Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: projects/clang700-import/ObsoleteFiles.inc
===================================================================
--- projects/clang700-import/ObsoleteFiles.inc (revision 340124)
+++ projects/clang700-import/ObsoleteFiles.inc (revision 340125)
@@ -1,10753 +1,10753 @@
#
# $FreeBSD$
#
# This file lists old files (OLD_FILES), libraries (OLD_LIBS) and
# directories (OLD_DIRS) which should get removed at an update. Recently
# removed entries first (with the date as a comment). Dynamic libraries are
# special cased (OLD_LIBS). Static libraries or the generic links to
# the dynamic libraries (lib*.so) should (if you don't know why to make an
# exception, make this a "must") be viewed as normal files (OLD_FILES).
#
# In case of a complete directory hierarchy the sorting is in depth first
# order.
#
# The file is partitioned: OLD_FILES first, then OLD_LIBS and OLD_DIRS last.
#
# Before you commit changes to this file please check if any entries in
# tools/build/mk/OptionalObsoleteFiles.inc can be removed. The following
# command tells which files are listed more than once regardless of some
# architecture specific conditionals, so you can not blindly trust the
# output:
# ( grep '+=' /usr/src/ObsoleteFiles.inc | sort -u ; \
# grep '+=' /usr/src/tools/build/mk/OptionalObsoleteFiles.inc | sort -u) | \
# sort | uniq -d
#
# To find regular duplicates not dependent on optional components, you can
# also use something that will not give you false positives, e.g.:
# for t in `make -V TARGETS universe`; do
# __MAKE_CONF=/dev/null make -f Makefile.inc1 TARGET=$t \
# -V OLD_FILES -V OLD_LIBS -V OLD_DIRS check-old | \
# xargs -n1 | sort | uniq -d;
# done
#
# For optional components, you can use the following to see if some entries
# in OptionalObsoleteFiles.inc have been obsoleted by ObsoleteFiles.inc
# for o in tools/build/options/WITH*; do
# __MAKE_CONF=/dev/null make -f Makefile.inc1 -D${o##*/} \
# -V OLD_FILES -V OLD_LIBS -V OLD_DIRS check-old | \
# xargs -n1 | sort | uniq -d;
# done
-# 2018mmdd: new clang import which bumps version from 6.0.1 to 7.0.0.
+# 2018mmdd: new clang import which bumps version from 6.0.1 to 7.0.1.
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/6.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/6.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/6.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/6.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/arm64intr.h
OLD_FILES+=usr/lib/clang/6.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/6.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/6.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/cetintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/6.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/6.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/6.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/6.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/6.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/6.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/6.0.1/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/6.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/6.0.1/include
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/6.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/6.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/6.0.1/lib
OLD_DIRS+=usr/lib/clang/6.0.1
# 20181026: joy(4) removal
OLD_FILES+=usr/share/man/man4/joy.4.gz
# 20181025: OpenSSL libraries version bump to avoid conflict with ports
OLD_LIBS+=lib/libcrypto.so.9
OLD_LIBS+=usr/lib/libssl.so.9
OLD_LIBS+=usr/lib32/libcrypto.so.9
OLD_LIBS+=usr/lib32/libssl.so.9
# 20181021: mse(4) removal
OLD_FILES+=usr/share/man/man4/mse.4.gz
# 20181015: Stale libcasper(3) files following r329452
OLD_LIBS+=lib/casper/libcap_sysctl.so.0
OLD_LIBS+=lib/casper/libcap_grp.so.0
OLD_LIBS+=lib/casper/libcap_pwd.so.0
OLD_LIBS+=lib/casper/libcap_random.so.0
OLD_LIBS+=lib/casper/libcap_dns.so.0
OLD_LIBS+=lib/casper/libcap_syslog.so.0
OLD_LIBS+=usr/lib32/libcap_sysctl.so.0
OLD_LIBS+=usr/lib32/libcap_grp.so.0
OLD_LIBS+=usr/lib32/libcap_pwd.so.0
OLD_LIBS+=usr/lib32/libcap_random.so.0
OLD_LIBS+=usr/lib32/libcap_dns.so.0
OLD_LIBS+=usr/lib32/libcap_syslog.so.0
# 20181009: OpenSSL 1.1.1
OLD_FILES+=usr/include/openssl/des_old.h
OLD_FILES+=usr/include/openssl/dso.h
OLD_FILES+=usr/include/openssl/krb5_asn.h
OLD_FILES+=usr/include/openssl/kssl.h
OLD_FILES+=usr/include/openssl/pqueue.h
OLD_FILES+=usr/include/openssl/ssl23.h
OLD_FILES+=usr/include/openssl/ui_compat.h
OLD_FILES+=usr/share/openssl/man/man1/dss1.1.gz
OLD_FILES+=usr/share/openssl/man/man1/md2.1.gz
OLD_FILES+=usr/share/openssl/man/man1/md4.1.gz
OLD_FILES+=usr/share/openssl/man/man1/md5.1.gz
OLD_FILES+=usr/share/openssl/man/man1/mdc2.1.gz
OLD_FILES+=usr/share/openssl/man/man1/ripemd160.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha1.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha224.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha256.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha384.1.gz
OLD_FILES+=usr/share/openssl/man/man1/sha512.1.gz
OLD_FILES+=usr/share/openssl/man/man1/x509v3_config.1.gz
OLD_FILES+=usr/share/openssl/man/man3/ASN1_STRING_length_set.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_get_conn_int_port.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_get_conn_ip.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_set.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_set_conn_int_port.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BIO_set_conn_ip.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_BLINDING_get_thread_id.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_BLINDING_set_thread_id.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_BLINDING_thread_id.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_MONT_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_RECP_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BN_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_memdup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_memdup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_strdup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_strlcat.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_strlcpy.3.gz
OLD_FILES+=usr/share/openssl/man/man3/BUF_strndup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CMS_set1_signer_cert.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_cmp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_cpy.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_current.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_get_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_hash.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_THREADID_set_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_destroy_dynlockid.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_get_new_dynlockid.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_lock.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_num_locks.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_dynlock_create_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_dynlock_destroy_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_dynlock_lock_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_locking_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/DES_ede3_cbcm_encrypt.3.gz
OLD_FILES+=usr/share/openssl/man/man3/DES_enc_read.3.gz
OLD_FILES+=usr/share/openssl/man/man3/DES_enc_write.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EC_KEY_get_key_method_data.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EC_KEY_insert_key_method_data.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EC_POINT_set_Jprojective_coordinates.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ERR_load_UI_strings.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_CIPHER_CTX_cleanup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_CIPHER_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MAX_MD_SIZE.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_cleanup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_create.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_destroy.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_MD_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEVP_PKEY_CTX_set_app_data.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_CTX_set_rsa_rsa_keygen_bits.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_get_default_digest.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_dss.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_dss1.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_sha.3.gz
OLD_FILES+=usr/share/openssl/man/man3/HMAC_CTX_cleanup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/HMAC_CTX_init.3.gz
OLD_FILES+=usr/share/openssl/man/man3/HMAC_cleanup.3.gz
OLD_FILES+=usr/share/openssl/man/man3/OPENSSL_ia32cap_loc.3.gz
OLD_FILES+=usr/share/openssl/man/man3/PEM.3.gz
OLD_FILES+=usr/share/openssl/man/man3/RAND_SSLeay.3.gz
OLD_FILES+=usr/share/openssl/man/man3/RSA_PKCS1_SSLeay.3.gz
OLD_FILES+=usr/share/openssl/man/man3/RSA_null_method.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_get_ex_new_index.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_need_tmp_rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_custom_cli_ext.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_default_read_ahead.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_ecdh_auto.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_tmp_rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_CTX_set_tmp_rsa_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_SESSION_get_ex_new_index.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_add_session.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_flush_sessions.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_get_accept_state.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_get_ex_new_index.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_get_msg_callback_arg.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_need_tmp_rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_remove_session.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_set_ecdh_auto.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_set_tmp_rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSL_set_tmp_rsa_callback.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLeay.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLeay_add_ssl_algorithms.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLeay_version.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLv2_client_method.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLv2_method.3.gz
OLD_FILES+=usr/share/openssl/man/man3/SSLv2_server_method.3.gz
OLD_FILES+=usr/share/openssl/man/man3/X509_STORE_CTX_set_chain.3.gz
OLD_FILES+=usr/share/openssl/man/man3/X509_STORE_CTX_trusted_stack.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/blowfish.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_add_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_check_top.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_cmp_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_div_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_dump.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_expand.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_expand2.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_fix_top.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_internal.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_add_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_comba4.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_comba8.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_high.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_low_normal.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_low_recursive.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_normal.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_part_recursive.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_recursive.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_mul_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_print.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_set_high.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_set_low.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_set_max.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_comba4.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_comba8.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_normal.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_recursive.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sqr_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_sub_words.3.gz
OLD_FILES+=usr/share/openssl/man/man3/bn_wexpand.3.gz
OLD_FILES+=usr/share/openssl/man/man3/buffer.3.gz
OLD_FILES+=usr/share/openssl/man/man3/crypto.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_ECPKParameters_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_ECPKParameters_fp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_ECPrivate_key.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_Netscape_RSA.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_PKCS8PrivateKey.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_Private_key.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_X509_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_X509_fp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des_read_2passwords.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des_read_password.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des_read_pw.3.gz
OLD_FILES+=usr/share/openssl/man/man3/des_read_pw_string.3.gz
OLD_FILES+=usr/share/openssl/man/man3/dh.3.gz
OLD_FILES+=usr/share/openssl/man/man3/dsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ec.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ecdsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/engine.3.gz
OLD_FILES+=usr/share/openssl/man/man3/err.3.gz
OLD_FILES+=usr/share/openssl/man/man3/evp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/hmac.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_ECPKParameters_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_ECPKParameters_fp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_Netscape_RSA.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_X509_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/i2d_X509_fp.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_delete.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_doall.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_doall_arg.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_error.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_free.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_insert.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_new.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_node_stats.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_node_stats_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_node_usage_stats.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_node_usage_stats_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_retrieve.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_stats.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lh_stats_bio.3.gz
OLD_FILES+=usr/share/openssl/man/man3/lhash.3.gz
OLD_FILES+=usr/share/openssl/man/man3/md5.3.gz
OLD_FILES+=usr/share/openssl/man/man3/mdc2.3.gz
OLD_FILES+=usr/share/openssl/man/man3/pem.3.gz
OLD_FILES+=usr/share/openssl/man/man3/rand.3.gz
OLD_FILES+=usr/share/openssl/man/man3/rc4.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ripemd.3.gz
OLD_FILES+=usr/share/openssl/man/man3/rsa.3.gz
OLD_FILES+=usr/share/openssl/man/man3/sha.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ssl.3.gz
OLD_FILES+=usr/share/openssl/man/man3/threads.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ui.3.gz
OLD_FILES+=usr/share/openssl/man/man3/ui_compat.3.gz
OLD_FILES+=usr/share/openssl/man/man3/x509.3.gz
OLD_LIBS+=lib/libcrypto.so.8
OLD_LIBS+=usr/lib/engines/lib4758cca.so
OLD_LIBS+=usr/lib/engines/libaep.so
OLD_LIBS+=usr/lib/engines/libatalla.so
OLD_LIBS+=usr/lib/engines/libcapi.so
OLD_LIBS+=usr/lib/engines/libchil.so
OLD_LIBS+=usr/lib/engines/libcswift.so
OLD_LIBS+=usr/lib/engines/libgost.so
OLD_LIBS+=usr/lib/engines/libnuron.so
OLD_LIBS+=usr/lib/engines/libsureware.so
OLD_LIBS+=usr/lib/engines/libubsec.so
OLD_LIBS+=usr/lib/libssl.so.8
OLD_LIBS+=usr/lib32/libcrypto.so.8
OLD_LIBS+=usr/lib32/lib4758cca.so
OLD_LIBS+=usr/lib32/libaep.so
OLD_LIBS+=usr/lib32/libatalla.so
OLD_LIBS+=usr/lib32/libcapi.so
OLD_LIBS+=usr/lib32/libchil.so
OLD_LIBS+=usr/lib32/libcswift.so
OLD_LIBS+=usr/lib32/libgost.so
OLD_LIBS+=usr/lib32/libnuron.so
OLD_LIBS+=usr/lib32/libsureware.so
OLD_LIBS+=usr/lib32/libubsec.so
OLD_LIBS+=usr/lib32/libssl.so.8
# 20180824: libbe(3) SHLIBDIR fixed to reflect correct location
OLD_LIBS+=usr/lib/libbe.so.1
# 20180819: Remove deprecated arc4random(3) stir/addrandom interfaces
OLD_FILES+=usr/share/man/man3/arc4random_addrandom.3.gz
OLD_FILES+=usr/share/man/man3/arc4random_stir.3.gz
# 20180819: send-pr(1) placeholder removal
OLD_FILES+=usr/bin/send-pr
# 20180725: Cleanup old libcasper.so.0
OLD_LIBS+=lib/libcasper.so.0
# 20180722: indent(1) option renamed, test files follow
OLD_FILES+=usr/bin/indent/tests/nsac.0
OLD_FILES+=usr/bin/indent/tests/nsac.0.pro
OLD_FILES+=usr/bin/indent/tests/nsac.0.stdout
OLD_FILES+=usr/bin/indent/tests/sac.0
OLD_FILES+=usr/bin/indent/tests/sac.0.pro
OLD_FILES+=usr/bin/indent/tests/sac.0.stdout
# 20180721: move of libmlx5.so.1 and libibverbs.so.1
OLD_LIBS+=usr/lib/libmlx5.so.1
OLD_LIBS+=usr/lib/libibverbs.so.1
# 20180710: old numa cleanup
OLD_FILES+=usr/include/sys/numa.h
OLD_FILES+=usr/share/man/man2/numa_getaffinity.2.gz
OLD_FILES+=usr/share/man/man2/numa_setaffinity.2.gz
OLD_FILES+=usr/share/man/man1/numactl.1.gz
OLD_FILES+=usr/bin/numactl
# 20180630: new clang import which bumps version from 6.0.0 to 6.0.1.
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/hwasan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/scudo_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/6.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/6.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/6.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/6.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/6.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/arm64intr.h
OLD_FILES+=usr/lib/clang/6.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/6.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/6.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512bitalgintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vbmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlbitalgintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlvbmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vlvnniintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vnniintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avx512vpopcntdqvlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/cetintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/clwbintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/6.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/gfniintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/6.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/6.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/6.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/6.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/6.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/6.0.0/include/vaesintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/vpclmulqdqintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/6.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/6.0.0/include
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/6.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/6.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/6.0.0/lib
OLD_DIRS+=usr/lib/clang/6.0.0
# 20180615: asf(8) removed
OLD_FILES+=usr/sbin/asf
OLD_FILES+=usr/share/man/man8/asf.8.gz
# 20180609: obsolete libc++ files missed from the 5.0.0 import
OLD_FILES+=usr/include/c++/v1/__refstring
OLD_FILES+=usr/include/c++/v1/__undef_min_max
OLD_FILES+=usr/include/c++/v1/tr1/__refstring
OLD_FILES+=usr/include/c++/v1/tr1/__undef_min_max
# 20180607: remove nls support from grep
OLD_FILES+=usr/share/nls/pt_BR.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.SJIS/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.eucJP/grep.cat
OLD_FILES+=usr/share/nls/gl_ES.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/zh_CN.UTF-8/grep.cat
OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/grep.cat
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/grep.cat
# 20180517: retire vxge
OLD_FILES+=usr/share/man/man4/if_vxge.4.gz
OLD_FILES+=usr/share/man/man4/vxge.4.gz
# 20180512: Rename Unbound tools
OLD_FILES+=usr/sbin/unbound
OLD_FILES+=usr/sbin/unbound-anchor
OLD_FILES+=usr/sbin/unbound-checkconf
OLD_FILES+=usr/sbin/unbound-control
OLD_FILES+=usr/share/man/man5/unbound.conf.5.gz
OLD_FILES+=usr/share/man/man8/unbound-anchor.8.gz
OLD_FILES+=usr/share/man/man8/unbound-checkconf.8.gz
OLD_FILES+=usr/share/man/man8/unbound-control.8.gz
OLD_FILES+=usr/share/man/man8/unbound.8.gz
# 20180508: retire nxge
OLD_FILES+=usr/share/man/man4/if_nxge.4.gz
OLD_FILES+=usr/share/man/man4/nxge.4.gz
# 20180505: rhosts
OLD_FILES+=usr/share/skel/dot.rhosts
# 20180502: retire ixgb
OLD_FILES+=usr/share/man/man4/if_ixgb.4.gz
OLD_FILES+=usr/share/man/man4/ixgb.4.gz
# 20180501: retire lmc
OLD_FILES+=usr/include/dev/lmc/if_lmc.h
OLD_DIRS+=usr/include/dev/lmc
OLD_FILES+=usr/sbin/lmcconfig
OLD_FILES+=usr/share/man/man4/lmc.4.gz
OLD_FILES+=usr/share/man/man4/if_lmc.4.gz
OLD_FILES+=usr/share/man/man8/lmcconfig.8.gz
# 20180417: remove fuswintr and suswintr
OLD_FILES+=usr/share/man/man9/fuswintr.9.gz
OLD_FILES+=usr/share/man/man9/suswintr.9.gz
# 20180413: remove Arcnet support
OLD_FILES+=usr/include/net/if_arc.h
OLD_FILES+=usr/share/man/man4/cm.4.gz
# 20180409: remove FDDI support
OLD_FILES+=usr/include/net/fddi.h
OLD_FILES+=usr/share/man/man4/fpa.4.gz
# 20180319: remove /boot/overlays, replaced by /boot/dtb/overlays
OLD_DIRS+=boot/overlays
# 20180311: remove sys/sys/i386/include/pcaudioio.h
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/pcaudioio.h
.endif
# 20180310: remove sys/sys/dataacq.h
OLD_FILES+=usr/include/sys/dataacq.h
# 20180306: remove DTrace scripts made obsolete by dwatch(1)
OLD_FILES+=usr/share/dtrace/watch_execve
OLD_FILES+=usr/share/dtrace/watch_kill
OLD_FILES+=usr/share/dtrace/watch_vop_remove
# 20180212: move devmatch
OLD_FILES+=usr/sbin/devmatch
# 20180211: remove usb.conf
OLD_FILES+=etc/devd/usb.conf
# 20180208: remove c_rehash(1)
OLD_FILES+=usr/share/openssl/man/man1/c_rehash.1.gz
# 20180206: remove gdbtui
OLD_FILES+=usr/bin/gdbtui
# 20180201: Obsolete forth files
OLD_FILES+=boot/pcibios.4th
# 20180114: new clang import which bumps version from 5.0.1 to 6.0.0.
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/5.0.1/include/sanitizer
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/5.0.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/5.0.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/altivec.h
OLD_FILES+=usr/lib/clang/5.0.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/5.0.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/5.0.1/include/armintr.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/5.0.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/5.0.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/5.0.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/5.0.1/include/msa.h
OLD_FILES+=usr/lib/clang/5.0.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/5.0.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/5.0.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/5.0.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/5.0.1/include
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/5.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/5.0.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/5.0.1/lib
OLD_DIRS+=usr/lib/clang/5.0.1
# 20180109: Remove vestiges of digi(4) driver
OLD_FILES+=usr/include/sys/digiio.h
OLD_FILES+=usr/sbin/digictl
OLD_FILES+=usr/share/man/man8/digictl.8.gz
# 20180107: Convert remaining geli(8) tests to ATF
OLD_FILES+=tests/sys/geom/class/eli/nokey_test.sh
OLD_FILES+=tests/sys/geom/class/eli/readonly_test.sh
# 20180106: Convert most geli(8) tests to ATF
OLD_FILES+=tests/sys/geom/class/eli/attach_d_test.sh
OLD_FILES+=tests/sys/geom/class/eli/configure_b_B_test.sh
OLD_FILES+=tests/sys/geom/class/eli/detach_l_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_B_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_J_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_a_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_alias_test.sh
OLD_FILES+=tests/sys/geom/class/eli/init_i_P_test.sh
OLD_FILES+=tests/sys/geom/class/eli/integrity_copy_test.sh
OLD_FILES+=tests/sys/geom/class/eli/integrity_data_test.sh
OLD_FILES+=tests/sys/geom/class/eli/integrity_hmac_test.sh
OLD_FILES+=tests/sys/geom/class/eli/onetime_a_test.sh
OLD_FILES+=tests/sys/geom/class/eli/onetime_d_test.sh
# 20171230: Remove /etc/skel from mtree
OLD_DIRS+=/etc/skel
# 20171208: Remove basename_r(3)
OLD_FILES+=usr/share/man/man3/basename_r.3.gz
# 20171204: Move fdformat man page from volume 1 to volume 8.
OLD_FILES+=usr/share/man/man1/fdformat.1.gz
# 20171203: libproc version bump
OLD_LIBS+=usr/lib/libproc.so.4
OLD_LIBS+=usr/lib32/libproc.so.4
# 20171203: new clang import which bumps version from 5.0.0 to 5.0.1.
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/tsan_interface.h
OLD_FILES+=usr/lib/clang/5.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/5.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/5.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/5.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/5.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/5.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/5.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avx512vpopcntdqintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/clzerointrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/5.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/lwpintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/5.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/5.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/5.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/5.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/5.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/5.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/5.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/5.0.0/include
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-armhf.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/5.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/5.0.0/lib
OLD_DIRS+=usr/lib/clang/5.0.0
# 20171118: Remove old etc casper files
OLD_FILES+=etc/casper/system.dns
OLD_FILES+=etc/casper/system.grp
OLD_FILES+=etc/casper/system.pwd
OLD_FILES+=etc/casper/system.random
OLD_FILES+=etc/casper/system.sysctl
OLD_DIRS+=etc/casper
# 20171116: lint(1) removal
OLD_FILES+=usr/bin/lint
OLD_FILES+=usr/libexec/lint1
OLD_FILES+=usr/libexec/lint2
OLD_FILES+=usr/libdata/lint/llib-lposix.ln
OLD_FILES+=usr/libdata/lint/llib-lstdc.ln
OLD_FILES+=usr/share/man/man1/lint.1.gz
OLD_FILES+=usr/share/man/man7/lint.7.gz
OLD_DIRS+=usr/libdata/lint
# 20171114: Removal of all fortune datfiles other than freebsd-tips
OLD_FILES+=usr/share/games/fortune/fortunes
OLD_FILES+=usr/share/games/fortune/fortunes.dat
OLD_FILES+=usr/share/games/fortune/gerrold.limerick
OLD_FILES+=usr/share/games/fortune/gerrold.limerick.dat
OLD_FILES+=usr/share/games/fortune/limerick
OLD_FILES+=usr/share/games/fortune/limerick.dat
OLD_FILES+=usr/share/games/fortune/murphy
OLD_FILES+=usr/share/games/fortune/murphy-o
OLD_FILES+=usr/share/games/fortune/murphy-o.dat
OLD_FILES+=usr/share/games/fortune/murphy.dat
OLD_FILES+=usr/share/games/fortune/startrek
OLD_FILES+=usr/share/games/fortune/startrek.dat
OLD_FILES+=usr/share/games/fortune/zippy
OLD_FILES+=usr/share/games/fortune/zippy.dat
# 20171112: Removal of eqnchar definition
OLD_FILES+=usr/share/misc/eqnchar
# 20171110: Removal of mailaddr man page
OLD_FILES+=usr/share/man/man7/mailaddr.7.gz
# 20171108: badsect(8) removal
OLD_FILES+=sbin/badsect
OLD_FILES+=rescue/badsect
OLD_FILES+=usr/share/man/man8/badsect.8.gz
# 20171105: fixing lib/libclang_rt CRTARCH for arm:armv[67].
.if ${MACHINE_ARCH:Marmv[67]*} != "" && \
(!defined(CPUTYPE) || ${CPUTYPE:M*soft*} == "")
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-arm.a
OLD_LIBS+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-arm.so
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan_cxx-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.safestack-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
.endif
# 20171104: libcap_random should be in /lib not in /usr/lib
OLD_LIBS+=usr/lib/libcap_random.so.0
# 20171104: Casper can work only as shared library
OLD_FILES+=usr/lib/libcap_dns.a
OLD_FILES+=usr/lib/libcap_dns_p.a
OLD_FILES+=usr/lib/libcap_grp.a
OLD_FILES+=usr/lib/libcap_grp_p.a
OLD_FILES+=usr/lib/libcap_pwd.a
OLD_FILES+=usr/lib/libcap_pwd_p.a
OLD_FILES+=usr/lib/libcap_random.a
OLD_FILES+=usr/lib/libcap_random_p.a
OLD_FILES+=usr/lib/libcap_sysctl.a
OLD_FILES+=usr/lib/libcap_sysctl_p.a
OLD_FILES+=usr/lib/libcasper.a
OLD_FILES+=usr/lib/libcasper_p.a
OLD_FILES+=usr/lib32/libcap_dns.a
OLD_FILES+=usr/lib32/libcap_dns_p.a
OLD_FILES+=usr/lib32/libcap_grp.a
OLD_FILES+=usr/lib32/libcap_grp_p.a
OLD_FILES+=usr/lib32/libcap_pwd.a
OLD_FILES+=usr/lib32/libcap_pwd_p.a
OLD_FILES+=usr/lib32/libcap_random.a
OLD_FILES+=usr/lib32/libcap_random_p.a
OLD_FILES+=usr/lib32/libcap_sysctl.a
OLD_FILES+=usr/lib32/libcap_sysctl_p.a
OLD_FILES+=usr/lib32/libcasper.a
OLD_FILES+=usr/lib32/libcasper_p.a
# 20171031: Removal of adding_user man page
OLD_FILES+=usr/share/man/man7/adding_user.7.gz
# 20171031: Disconnected libpathconv tests
OLD_DIRS+=usr/tests/lib/libpathconv
# 20171017: Removal of mbpool(9)
OLD_FILES+=usr/include/sys/mbpool.h
OLD_FILES+=usr/share/man/man9/mbpool.9.gz
OLD_FILES+=usr/share/man/man9/mbp_destroy.9.gz
OLD_FILES+=usr/share/man/man9/mbp_alloc.9.gz
OLD_FILES+=usr/share/man/man9/mbp_ext_free.9.gz
OLD_FILES+=usr/share/man/man9/mbp_count.9.gz
OLD_FILES+=usr/share/man/man9/mbp_card_free.9.gz
OLD_FILES+=usr/share/man/man9/mbp_get_keep.9.gz
OLD_FILES+=usr/share/man/man9/mbp_free.9.gz
OLD_FILES+=usr/share/man/man9/mbp_get.9.gz
OLD_FILES+=usr/share/man/man9/mbp_create.9.gz
OLD_FILES+=usr/share/man/man9/mbp_sync.9.gz
# 20171010: Remove libstand
OLD_FILES+=usr/lib/libstand.a
OLD_FILES+=usr/lib/libstand_p.a
OLD_FILES+=usr/lib32/libstand.a
OLD_FILES+=usr/lib32/libstand_p.a
OLD_FILES+=usr/include/stand.h
OLD_FILES+=usr/share/man/man3/libstand.3.gz
# 20171003: remove RCMDS
OLD_FILES+=bin/rcp
OLD_FILES+=rescue/rcp
OLD_FILES+=usr/bin/rlogin
OLD_FILES+=usr/bin/rsh
OLD_FILES+=usr/libexec/rlogind
OLD_FILES+=usr/libexec/rshd
OLD_FILES+=usr/share/man/man1/rcp.1.gz
OLD_FILES+=usr/share/man/man1/rlogin.1.gz
OLD_FILES+=usr/share/man/man1/rsh.1.gz
OLD_FILES+=usr/share/man/man8/rlogind.8.gz
OLD_FILES+=usr/share/man/man8/rshd.8.gz
# 20170927: crshared
OLD_FILES+=usr/share/man/man9/crshared.9.gz
# 20170927: procctl
OLD_FILES+=usr/share/man/man8/procctl.8.gz
OLD_FILES+=usr/sbin/procctl
# 20170926: remove unneeded man aliases and locales directory
OLD_FILES+=usr/share/man/en.ISO8859-1/man1
OLD_FILES+=usr/share/man/en.ISO8859-1/man2
OLD_FILES+=usr/share/man/en.ISO8859-1/man3
OLD_FILES+=usr/share/man/en.ISO8859-1/man4
OLD_FILES+=usr/share/man/en.ISO8859-1/man5
OLD_FILES+=usr/share/man/en.ISO8859-1/man6
OLD_FILES+=usr/share/man/en.ISO8859-1/man7
OLD_FILES+=usr/share/man/en.ISO8859-1/man8
OLD_FILES+=usr/share/man/en.ISO8859-1/man9
OLD_DIRS+=usr/share/man/en.ISO8859-1
OLD_FILES+=usr/share/man/en.ISO8859-1/mandoc.db
OLD_FILES+=usr/share/man/en.UTF-8/man1
OLD_FILES+=usr/share/man/en.UTF-8/man2
OLD_FILES+=usr/share/man/en.UTF-8/man3
OLD_FILES+=usr/share/man/en.UTF-8/man4
OLD_FILES+=usr/share/man/en.UTF-8/man5
OLD_FILES+=usr/share/man/en.UTF-8/man6
OLD_FILES+=usr/share/man/en.UTF-8/man7
OLD_FILES+=usr/share/man/en.UTF-8/man8
OLD_FILES+=usr/share/man/en.UTF-8/man9
OLD_FILES+=usr/share/man/en.UTF-8/mandoc.db
OLD_DIRS+=usr/share/man/en.UTF-8
OLD_FILES+=usr/share/man/en.ISO8859-15
OLD_FILES+=usr/share/openssl/man/en.ISO8859-1/man1
OLD_FILES+=usr/share/openssl/man/en.ISO8859-1/man3
OLD_FILES+=usr/share/openssl/man/en.ISO8859-1/mandoc.db
OLD_DIRS+=usr/share/openssl/man/en.ISO8859-1
OLD_FILES+=usr/share/openssl/man/en.ISO8859-15
OLD_DIRS+=usr/share/man/ja/man1
OLD_DIRS+=usr/share/man/ja/man2
OLD_DIRS+=usr/share/man/ja/man3
OLD_DIRS+=usr/share/man/ja/man4
OLD_DIRS+=usr/share/man/ja/man5
OLD_DIRS+=usr/share/man/ja/man6
OLD_DIRS+=usr/share/man/ja/man7
OLD_DIRS+=usr/share/man/ja/man8
OLD_DIRS+=usr/share/man/ja/man9
OLD_DIRS+=usr/share/man/ja
# 20170913: remove unneeded catman utility
OLD_FILES+=etc/periodic/weekly/330.catman
OLD_FILES+=usr/bin/catman
OLD_FILES+=usr/libexec/catman.local
OLD_FILES+=usr/share/man/man1/catman.1.gz
OLD_FILES+=usr/share/man/man8/catman.local.8.gz
OLD_DIRS+=usr/share/man/cat1
OLD_DIRS+=usr/share/man/cat2
OLD_DIRS+=usr/share/man/cat3
OLD_DIRS+=usr/share/man/cat4/amd64
OLD_DIRS+=usr/share/man/cat4/arm
OLD_DIRS+=usr/share/man/cat4/i386
OLD_DIRS+=usr/share/man/cat4/powerpc
OLD_DIRS+=usr/share/man/cat4/sparc64
OLD_DIRS+=usr/share/man/cat4
OLD_DIRS+=usr/share/man/cat5
OLD_DIRS+=usr/share/man/cat6
OLD_DIRS+=usr/share/man/cat7
OLD_DIRS+=usr/share/man/cat8/amd64
OLD_DIRS+=usr/share/man/cat8/arm
OLD_DIRS+=usr/share/man/cat8/i386
OLD_DIRS+=usr/share/man/cat8/powerpc
OLD_DIRS+=usr/share/man/cat8/sparc64
OLD_DIRS+=usr/share/man/cat8
OLD_DIRS+=usr/share/man/cat9
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat1
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat2
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat3
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/amd64
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/arm
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/i386
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/powerpc
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4/sparc64
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat4
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat5
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat6
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat7
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/amd64
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/arm
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/i386
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/powerpc
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8/sparc64
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat8
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat9
OLD_DIRS+=usr/share/man/en.UTF-8/cat1
OLD_DIRS+=usr/share/man/en.UTF-8/cat2
OLD_DIRS+=usr/share/man/en.UTF-8/cat3
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/amd64
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/arm
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/i386
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/powerpc
OLD_DIRS+=usr/share/man/en.UTF-8/cat4/sparc64
OLD_DIRS+=usr/share/man/en.UTF-8/cat4
OLD_DIRS+=usr/share/man/en.UTF-8/cat5
OLD_DIRS+=usr/share/man/en.UTF-8/cat6
OLD_DIRS+=usr/share/man/en.UTF-8/cat7
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/amd64
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/arm
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/i386
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/powerpc
OLD_DIRS+=usr/share/man/en.UTF-8/cat8/sparc64
OLD_DIRS+=usr/share/man/en.UTF-8/cat8
OLD_DIRS+=usr/share/man/en.UTF-8/cat9
OLD_DIRS+=usr/share/man/ja/cat1
OLD_DIRS+=usr/share/man/ja/cat2
OLD_DIRS+=usr/share/man/ja/cat3
OLD_DIRS+=usr/share/man/ja/cat4/amd64
OLD_DIRS+=usr/share/man/ja/cat4/arm
OLD_DIRS+=usr/share/man/ja/cat4/i386
OLD_DIRS+=usr/share/man/ja/cat4/powerpc
OLD_DIRS+=usr/share/man/ja/cat4/sparc64
OLD_DIRS+=usr/share/man/ja/cat4
OLD_DIRS+=usr/share/man/ja/cat5
OLD_DIRS+=usr/share/man/ja/cat6
OLD_DIRS+=usr/share/man/ja/cat7
OLD_DIRS+=usr/share/man/ja/cat8/amd64
OLD_DIRS+=usr/share/man/ja/cat8/arm
OLD_DIRS+=usr/share/man/ja/cat8/powerpc
OLD_DIRS+=usr/share/man/ja/cat8/sparc64
OLD_DIRS+=usr/share/man/ja/cat8
OLD_DIRS+=usr/share/man/ja/cat9
OLD_DIRS+=usr/share/openssl/man/cat1
OLD_DIRS+=usr/share/openssl/man/cat3
OLD_DIRS+=usr/share/openssl/man/en.ISO8859-1/cat1
OLD_DIRS+=usr/share/openssl/man/en.ISO8859-1/cat3
# 20170802: ksyms(4) ioctl interface was removed
OLD_FILES+=usr/include/sys/ksyms.h
# 20170722: new clang import which bumps version from 4.0.0 to 5.0.0.
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/4.0.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/4.0.0/include/sanitizer
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_complex_builtins.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/4.0.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/4.0.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/altivec.h
OLD_FILES+=usr/lib/clang/4.0.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/4.0.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/4.0.0/include/armintr.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/4.0.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/4.0.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/4.0.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/4.0.0/include/msa.h
OLD_FILES+=usr/lib/clang/4.0.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/4.0.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/4.0.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/4.0.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/4.0.0/include
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/4.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/4.0.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/4.0.0/lib
OLD_DIRS+=usr/lib/clang/4.0.0
OLD_FILES+=usr/bin/llvm-pdbdump
# 20170610: chown-f_test replaced by chown_test
OLD_FILES+=usr/tests/usr.sbin/chown/chown-f_test
# 20170609: drop obsolete manpage link (if_rtwn.ko -> rtwn.ko)
OLD_FILES+=usr/share/man/man4/if_rtwn.4.gz
# 20170531: removal of groff
OLD_FILES+=usr/bin/addftinfo
OLD_FILES+=usr/bin/afmtodit
OLD_FILES+=usr/bin/checknr
OLD_FILES+=usr/bin/colcrt
OLD_FILES+=usr/bin/eqn
OLD_FILES+=usr/bin/grn
OLD_FILES+=usr/bin/grodvi
OLD_FILES+=usr/bin/groff
OLD_FILES+=usr/bin/grog
OLD_FILES+=usr/bin/grolbp
OLD_FILES+=usr/bin/grolj4
OLD_FILES+=usr/bin/grops
OLD_FILES+=usr/bin/grotty
OLD_FILES+=usr/bin/hpftodit
OLD_FILES+=usr/bin/indxbib
OLD_FILES+=usr/bin/lkbib
OLD_FILES+=usr/bin/lookbib
OLD_FILES+=usr/bin/mmroff
OLD_FILES+=usr/bin/neqn
OLD_FILES+=usr/bin/nroff
OLD_FILES+=usr/bin/pfbtops
OLD_FILES+=usr/bin/pic
OLD_FILES+=usr/bin/post-grohtml
OLD_FILES+=usr/bin/pre-grohtml
OLD_FILES+=usr/bin/psroff
OLD_FILES+=usr/bin/refer
OLD_FILES+=usr/bin/tbl
OLD_FILES+=usr/bin/tfmtodit
OLD_FILES+=usr/bin/troff
OLD_FILES+=usr/bin/vgrind
OLD_FILES+=usr/libexec/vfontedpr
OLD_FILES+=usr/share/dict/eign
OLD_FILES+=usr/share/groff_font/devX100-12/CB
OLD_FILES+=usr/share/groff_font/devX100-12/CBI
OLD_FILES+=usr/share/groff_font/devX100-12/CI
OLD_FILES+=usr/share/groff_font/devX100-12/CR
OLD_FILES+=usr/share/groff_font/devX100-12/DESC
OLD_FILES+=usr/share/groff_font/devX100-12/HB
OLD_FILES+=usr/share/groff_font/devX100-12/HBI
OLD_FILES+=usr/share/groff_font/devX100-12/HI
OLD_FILES+=usr/share/groff_font/devX100-12/HR
OLD_FILES+=usr/share/groff_font/devX100-12/NB
OLD_FILES+=usr/share/groff_font/devX100-12/NBI
OLD_FILES+=usr/share/groff_font/devX100-12/NI
OLD_FILES+=usr/share/groff_font/devX100-12/NR
OLD_FILES+=usr/share/groff_font/devX100-12/S
OLD_FILES+=usr/share/groff_font/devX100-12/TB
OLD_FILES+=usr/share/groff_font/devX100-12/TBI
OLD_FILES+=usr/share/groff_font/devX100-12/TI
OLD_FILES+=usr/share/groff_font/devX100-12/TR
OLD_DIRS+=usr/share/groff_font/devX100-12
OLD_FILES+=usr/share/groff_font/devX100/CB
OLD_FILES+=usr/share/groff_font/devX100/CBI
OLD_FILES+=usr/share/groff_font/devX100/CI
OLD_FILES+=usr/share/groff_font/devX100/CR
OLD_FILES+=usr/share/groff_font/devX100/DESC
OLD_FILES+=usr/share/groff_font/devX100/HB
OLD_FILES+=usr/share/groff_font/devX100/HBI
OLD_FILES+=usr/share/groff_font/devX100/HI
OLD_FILES+=usr/share/groff_font/devX100/HR
OLD_FILES+=usr/share/groff_font/devX100/NB
OLD_FILES+=usr/share/groff_font/devX100/NBI
OLD_FILES+=usr/share/groff_font/devX100/NI
OLD_FILES+=usr/share/groff_font/devX100/NR
OLD_FILES+=usr/share/groff_font/devX100/S
OLD_FILES+=usr/share/groff_font/devX100/TB
OLD_FILES+=usr/share/groff_font/devX100/TBI
OLD_FILES+=usr/share/groff_font/devX100/TI
OLD_FILES+=usr/share/groff_font/devX100/TR
OLD_DIRS+=usr/share/groff_font/devX100
OLD_FILES+=usr/share/groff_font/devX75-12/CB
OLD_FILES+=usr/share/groff_font/devX75-12/CBI
OLD_FILES+=usr/share/groff_font/devX75-12/CI
OLD_FILES+=usr/share/groff_font/devX75-12/CR
OLD_FILES+=usr/share/groff_font/devX75-12/DESC
OLD_FILES+=usr/share/groff_font/devX75-12/HB
OLD_FILES+=usr/share/groff_font/devX75-12/HBI
OLD_FILES+=usr/share/groff_font/devX75-12/HI
OLD_FILES+=usr/share/groff_font/devX75-12/HR
OLD_FILES+=usr/share/groff_font/devX75-12/NB
OLD_FILES+=usr/share/groff_font/devX75-12/NBI
OLD_FILES+=usr/share/groff_font/devX75-12/NI
OLD_FILES+=usr/share/groff_font/devX75-12/NR
OLD_FILES+=usr/share/groff_font/devX75-12/S
OLD_FILES+=usr/share/groff_font/devX75-12/TB
OLD_FILES+=usr/share/groff_font/devX75-12/TBI
OLD_FILES+=usr/share/groff_font/devX75-12/TI
OLD_FILES+=usr/share/groff_font/devX75-12/TR
OLD_DIRS+=usr/share/groff_font/devX75-12
OLD_FILES+=usr/share/groff_font/devX75/CB
OLD_FILES+=usr/share/groff_font/devX75/CBI
OLD_FILES+=usr/share/groff_font/devX75/CI
OLD_FILES+=usr/share/groff_font/devX75/CR
OLD_FILES+=usr/share/groff_font/devX75/DESC
OLD_FILES+=usr/share/groff_font/devX75/HB
OLD_FILES+=usr/share/groff_font/devX75/HBI
OLD_FILES+=usr/share/groff_font/devX75/HI
OLD_FILES+=usr/share/groff_font/devX75/HR
OLD_FILES+=usr/share/groff_font/devX75/NB
OLD_FILES+=usr/share/groff_font/devX75/NBI
OLD_FILES+=usr/share/groff_font/devX75/NI
OLD_FILES+=usr/share/groff_font/devX75/NR
OLD_FILES+=usr/share/groff_font/devX75/S
OLD_FILES+=usr/share/groff_font/devX75/TB
OLD_FILES+=usr/share/groff_font/devX75/TBI
OLD_FILES+=usr/share/groff_font/devX75/TI
OLD_FILES+=usr/share/groff_font/devX75/TR
OLD_DIRS+=usr/share/groff_font/devX75
OLD_FILES+=usr/share/groff_font/devascii/B
OLD_FILES+=usr/share/groff_font/devascii/BI
OLD_FILES+=usr/share/groff_font/devascii/CW
OLD_FILES+=usr/share/groff_font/devascii/DESC
OLD_FILES+=usr/share/groff_font/devascii/I
OLD_FILES+=usr/share/groff_font/devascii/L
OLD_FILES+=usr/share/groff_font/devascii/R
OLD_FILES+=usr/share/groff_font/devascii/S
OLD_DIRS+=usr/share/groff_font/devascii
OLD_FILES+=usr/share/groff_font/devcp1047/B
OLD_FILES+=usr/share/groff_font/devcp1047/BI
OLD_FILES+=usr/share/groff_font/devcp1047/CW
OLD_FILES+=usr/share/groff_font/devcp1047/DESC
OLD_FILES+=usr/share/groff_font/devcp1047/I
OLD_FILES+=usr/share/groff_font/devcp1047/L
OLD_FILES+=usr/share/groff_font/devcp1047/R
OLD_FILES+=usr/share/groff_font/devcp1047/S
OLD_DIRS+=usr/share/groff_font/devcp1047
OLD_FILES+=usr/share/groff_font/devdvi/CW
OLD_FILES+=usr/share/groff_font/devdvi/CWEC
OLD_FILES+=usr/share/groff_font/devdvi/CWI
OLD_FILES+=usr/share/groff_font/devdvi/CWIEC
OLD_FILES+=usr/share/groff_font/devdvi/CWITC
OLD_FILES+=usr/share/groff_font/devdvi/CWTC
OLD_FILES+=usr/share/groff_font/devdvi/CompileFonts
OLD_FILES+=usr/share/groff_font/devdvi/DESC
OLD_FILES+=usr/share/groff_font/devdvi/EX
OLD_FILES+=usr/share/groff_font/devdvi/HB
OLD_FILES+=usr/share/groff_font/devdvi/HBEC
OLD_FILES+=usr/share/groff_font/devdvi/HBI
OLD_FILES+=usr/share/groff_font/devdvi/HBIEC
OLD_FILES+=usr/share/groff_font/devdvi/HBITC
OLD_FILES+=usr/share/groff_font/devdvi/HBTC
OLD_FILES+=usr/share/groff_font/devdvi/HI
OLD_FILES+=usr/share/groff_font/devdvi/HIEC
OLD_FILES+=usr/share/groff_font/devdvi/HITC
OLD_FILES+=usr/share/groff_font/devdvi/HR
OLD_FILES+=usr/share/groff_font/devdvi/HREC
OLD_FILES+=usr/share/groff_font/devdvi/HRTC
OLD_FILES+=usr/share/groff_font/devdvi/MI
OLD_FILES+=usr/share/groff_font/devdvi/Makefile
OLD_FILES+=usr/share/groff_font/devdvi/S
OLD_FILES+=usr/share/groff_font/devdvi/SA
OLD_FILES+=usr/share/groff_font/devdvi/SB
OLD_FILES+=usr/share/groff_font/devdvi/SC
OLD_FILES+=usr/share/groff_font/devdvi/TB
OLD_FILES+=usr/share/groff_font/devdvi/TBEC
OLD_FILES+=usr/share/groff_font/devdvi/TBI
OLD_FILES+=usr/share/groff_font/devdvi/TBIEC
OLD_FILES+=usr/share/groff_font/devdvi/TBITC
OLD_FILES+=usr/share/groff_font/devdvi/TBTC
OLD_FILES+=usr/share/groff_font/devdvi/TI
OLD_FILES+=usr/share/groff_font/devdvi/TIEC
OLD_FILES+=usr/share/groff_font/devdvi/TITC
OLD_FILES+=usr/share/groff_font/devdvi/TR
OLD_FILES+=usr/share/groff_font/devdvi/TREC
OLD_FILES+=usr/share/groff_font/devdvi/TRTC
OLD_FILES+=usr/share/groff_font/devdvi/ec.map
OLD_FILES+=usr/share/groff_font/devdvi/msam.map
OLD_FILES+=usr/share/groff_font/devdvi/msbm.map
OLD_FILES+=usr/share/groff_font/devdvi/tc.map
OLD_FILES+=usr/share/groff_font/devdvi/texb.map
OLD_FILES+=usr/share/groff_font/devdvi/texex.map
OLD_FILES+=usr/share/groff_font/devdvi/texi.map
OLD_FILES+=usr/share/groff_font/devdvi/texmi.map
OLD_FILES+=usr/share/groff_font/devdvi/texr.map
OLD_FILES+=usr/share/groff_font/devdvi/texsy.map
OLD_FILES+=usr/share/groff_font/devdvi/textex.map
OLD_FILES+=usr/share/groff_font/devdvi/textt.map
OLD_DIRS+=usr/share/groff_font/devdvi
OLD_FILES+=usr/share/groff_font/devhtml/B
OLD_FILES+=usr/share/groff_font/devhtml/BI
OLD_FILES+=usr/share/groff_font/devhtml/CB
OLD_FILES+=usr/share/groff_font/devhtml/CBI
OLD_FILES+=usr/share/groff_font/devhtml/CI
OLD_FILES+=usr/share/groff_font/devhtml/CR
OLD_FILES+=usr/share/groff_font/devhtml/DESC
OLD_FILES+=usr/share/groff_font/devhtml/I
OLD_FILES+=usr/share/groff_font/devhtml/R
OLD_FILES+=usr/share/groff_font/devhtml/S
OLD_DIRS+=usr/share/groff_font/devhtml
OLD_FILES+=usr/share/groff_font/devkoi8-r/B
OLD_FILES+=usr/share/groff_font/devkoi8-r/BI
OLD_FILES+=usr/share/groff_font/devkoi8-r/CW
OLD_FILES+=usr/share/groff_font/devkoi8-r/DESC
OLD_FILES+=usr/share/groff_font/devkoi8-r/I
OLD_FILES+=usr/share/groff_font/devkoi8-r/L
OLD_FILES+=usr/share/groff_font/devkoi8-r/R
OLD_FILES+=usr/share/groff_font/devkoi8-r/S
OLD_DIRS+=usr/share/groff_font/devkoi8-r
OLD_FILES+=usr/share/groff_font/devlatin1/B
OLD_FILES+=usr/share/groff_font/devlatin1/BI
OLD_FILES+=usr/share/groff_font/devlatin1/CW
OLD_FILES+=usr/share/groff_font/devlatin1/DESC
OLD_FILES+=usr/share/groff_font/devlatin1/I
OLD_FILES+=usr/share/groff_font/devlatin1/L
OLD_FILES+=usr/share/groff_font/devlatin1/R
OLD_FILES+=usr/share/groff_font/devlatin1/S
OLD_DIRS+=usr/share/groff_font/devlatin1
OLD_FILES+=usr/share/groff_font/devlbp/CB
OLD_FILES+=usr/share/groff_font/devlbp/CI
OLD_FILES+=usr/share/groff_font/devlbp/CR
OLD_FILES+=usr/share/groff_font/devlbp/DESC
OLD_FILES+=usr/share/groff_font/devlbp/EB
OLD_FILES+=usr/share/groff_font/devlbp/EI
OLD_FILES+=usr/share/groff_font/devlbp/ER
OLD_FILES+=usr/share/groff_font/devlbp/HB
OLD_FILES+=usr/share/groff_font/devlbp/HBI
OLD_FILES+=usr/share/groff_font/devlbp/HI
OLD_FILES+=usr/share/groff_font/devlbp/HNB
OLD_FILES+=usr/share/groff_font/devlbp/HNBI
OLD_FILES+=usr/share/groff_font/devlbp/HNI
OLD_FILES+=usr/share/groff_font/devlbp/HNR
OLD_FILES+=usr/share/groff_font/devlbp/HR
OLD_FILES+=usr/share/groff_font/devlbp/TB
OLD_FILES+=usr/share/groff_font/devlbp/TBI
OLD_FILES+=usr/share/groff_font/devlbp/TI
OLD_FILES+=usr/share/groff_font/devlbp/TR
OLD_DIRS+=usr/share/groff_font/devlbp
OLD_FILES+=usr/share/groff_font/devlj4/AB
OLD_FILES+=usr/share/groff_font/devlj4/ABI
OLD_FILES+=usr/share/groff_font/devlj4/AI
OLD_FILES+=usr/share/groff_font/devlj4/ALBB
OLD_FILES+=usr/share/groff_font/devlj4/ALBR
OLD_FILES+=usr/share/groff_font/devlj4/AOB
OLD_FILES+=usr/share/groff_font/devlj4/AOI
OLD_FILES+=usr/share/groff_font/devlj4/AOR
OLD_FILES+=usr/share/groff_font/devlj4/AR
OLD_FILES+=usr/share/groff_font/devlj4/CB
OLD_FILES+=usr/share/groff_font/devlj4/CBI
OLD_FILES+=usr/share/groff_font/devlj4/CI
OLD_FILES+=usr/share/groff_font/devlj4/CLARENDON
OLD_FILES+=usr/share/groff_font/devlj4/CORONET
OLD_FILES+=usr/share/groff_font/devlj4/CR
OLD_FILES+=usr/share/groff_font/devlj4/DESC
OLD_FILES+=usr/share/groff_font/devlj4/GB
OLD_FILES+=usr/share/groff_font/devlj4/GBI
OLD_FILES+=usr/share/groff_font/devlj4/GI
OLD_FILES+=usr/share/groff_font/devlj4/GR
OLD_FILES+=usr/share/groff_font/devlj4/LGB
OLD_FILES+=usr/share/groff_font/devlj4/LGI
OLD_FILES+=usr/share/groff_font/devlj4/LGR
OLD_FILES+=usr/share/groff_font/devlj4/MARIGOLD
OLD_FILES+=usr/share/groff_font/devlj4/OB
OLD_FILES+=usr/share/groff_font/devlj4/OBI
OLD_FILES+=usr/share/groff_font/devlj4/OI
OLD_FILES+=usr/share/groff_font/devlj4/OR
OLD_FILES+=usr/share/groff_font/devlj4/S
OLD_FILES+=usr/share/groff_font/devlj4/SYMBOL
OLD_FILES+=usr/share/groff_font/devlj4/TB
OLD_FILES+=usr/share/groff_font/devlj4/TBI
OLD_FILES+=usr/share/groff_font/devlj4/TI
OLD_FILES+=usr/share/groff_font/devlj4/TNRB
OLD_FILES+=usr/share/groff_font/devlj4/TNRBI
OLD_FILES+=usr/share/groff_font/devlj4/TNRI
OLD_FILES+=usr/share/groff_font/devlj4/TNRR
OLD_FILES+=usr/share/groff_font/devlj4/TR
OLD_FILES+=usr/share/groff_font/devlj4/UB
OLD_FILES+=usr/share/groff_font/devlj4/UBI
OLD_FILES+=usr/share/groff_font/devlj4/UCB
OLD_FILES+=usr/share/groff_font/devlj4/UCBI
OLD_FILES+=usr/share/groff_font/devlj4/UCI
OLD_FILES+=usr/share/groff_font/devlj4/UCR
OLD_FILES+=usr/share/groff_font/devlj4/UI
OLD_FILES+=usr/share/groff_font/devlj4/UR
OLD_FILES+=usr/share/groff_font/devlj4/WINGDINGS
OLD_DIRS+=usr/share/groff_font/devlj4
OLD_FILES+=usr/share/groff_font/devps/AB
OLD_FILES+=usr/share/groff_font/devps/ABI
OLD_FILES+=usr/share/groff_font/devps/AI
OLD_FILES+=usr/share/groff_font/devps/AR
OLD_FILES+=usr/share/groff_font/devps/BMB
OLD_FILES+=usr/share/groff_font/devps/BMBI
OLD_FILES+=usr/share/groff_font/devps/BMI
OLD_FILES+=usr/share/groff_font/devps/BMR
OLD_FILES+=usr/share/groff_font/devps/CB
OLD_FILES+=usr/share/groff_font/devps/CBI
OLD_FILES+=usr/share/groff_font/devps/CI
OLD_FILES+=usr/share/groff_font/devps/CR
OLD_FILES+=usr/share/groff_font/devps/DESC
OLD_FILES+=usr/share/groff_font/devps/EURO
OLD_FILES+=usr/share/groff_font/devps/HB
OLD_FILES+=usr/share/groff_font/devps/HBI
OLD_FILES+=usr/share/groff_font/devps/HI
OLD_FILES+=usr/share/groff_font/devps/HNB
OLD_FILES+=usr/share/groff_font/devps/HNBI
OLD_FILES+=usr/share/groff_font/devps/HNI
OLD_FILES+=usr/share/groff_font/devps/HNR
OLD_FILES+=usr/share/groff_font/devps/HR
OLD_FILES+=usr/share/groff_font/devps/Makefile
OLD_FILES+=usr/share/groff_font/devps/NB
OLD_FILES+=usr/share/groff_font/devps/NBI
OLD_FILES+=usr/share/groff_font/devps/NI
OLD_FILES+=usr/share/groff_font/devps/NR
OLD_FILES+=usr/share/groff_font/devps/PB
OLD_FILES+=usr/share/groff_font/devps/PBI
OLD_FILES+=usr/share/groff_font/devps/PI
OLD_FILES+=usr/share/groff_font/devps/PR
OLD_FILES+=usr/share/groff_font/devps/S
OLD_FILES+=usr/share/groff_font/devps/SS
OLD_FILES+=usr/share/groff_font/devps/TB
OLD_FILES+=usr/share/groff_font/devps/TBI
OLD_FILES+=usr/share/groff_font/devps/TI
OLD_FILES+=usr/share/groff_font/devps/TR
OLD_FILES+=usr/share/groff_font/devps/ZCMI
OLD_FILES+=usr/share/groff_font/devps/ZD
OLD_FILES+=usr/share/groff_font/devps/ZDR
OLD_FILES+=usr/share/groff_font/devps/afmname
OLD_FILES+=usr/share/groff_font/devps/dingbats.map
OLD_FILES+=usr/share/groff_font/devps/dingbats.rmap
OLD_FILES+=usr/share/groff_font/devps/download
OLD_FILES+=usr/share/groff_font/devps/freeeuro.pfa
OLD_FILES+=usr/share/groff_font/devps/lgreekmap
OLD_FILES+=usr/share/groff_font/devps/prologue
OLD_FILES+=usr/share/groff_font/devps/symbol.sed
OLD_FILES+=usr/share/groff_font/devps/symbolchars
OLD_FILES+=usr/share/groff_font/devps/symbolsl.afm
OLD_FILES+=usr/share/groff_font/devps/symbolsl.pfa
OLD_FILES+=usr/share/groff_font/devps/text.enc
OLD_FILES+=usr/share/groff_font/devps/textmap
OLD_FILES+=usr/share/groff_font/devps/zapfdr.pfa
OLD_DIRS+=usr/share/groff_font/devps
OLD_FILES+=usr/share/groff_font/devutf8/B
OLD_FILES+=usr/share/groff_font/devutf8/BI
OLD_FILES+=usr/share/groff_font/devutf8/CW
OLD_FILES+=usr/share/groff_font/devutf8/DESC
OLD_FILES+=usr/share/groff_font/devutf8/I
OLD_FILES+=usr/share/groff_font/devutf8/L
OLD_FILES+=usr/share/groff_font/devutf8/R
OLD_FILES+=usr/share/groff_font/devutf8/S
OLD_DIRS+=usr/share/groff_font/devutf8
OLD_DIRS+=usr/share/groff_font
OLD_FILES+=usr/share/man/man1/addftinfo.1.gz
OLD_FILES+=usr/share/man/man1/afmtodit.1.gz
OLD_FILES+=usr/share/man/man1/checknr.1.gz
OLD_FILES+=usr/share/man/man1/colcrt.1.gz
OLD_FILES+=usr/share/man/man1/eqn.1.gz
OLD_FILES+=usr/share/man/man1/grn.1.gz
OLD_FILES+=usr/share/man/man1/grodvi.1.gz
OLD_FILES+=usr/share/man/man1/groff.1.gz
OLD_FILES+=usr/share/man/man1/grog.1.gz
OLD_FILES+=usr/share/man/man1/grolbp.1.gz
OLD_FILES+=usr/share/man/man1/grolj4.1.gz
OLD_FILES+=usr/share/man/man1/grops.1.gz
OLD_FILES+=usr/share/man/man1/grotty.1.gz
OLD_FILES+=usr/share/man/man1/hpftodit.1.gz
OLD_FILES+=usr/share/man/man1/indxbib.1.gz
OLD_FILES+=usr/share/man/man1/lkbib.1.gz
OLD_FILES+=usr/share/man/man1/lookbib.1.gz
OLD_FILES+=usr/share/man/man1/mmroff.1.gz
OLD_FILES+=usr/share/man/man1/neqn.1.gz
OLD_FILES+=usr/share/man/man1/nroff.1.gz
OLD_FILES+=usr/share/man/man1/pfbtops.1.gz
OLD_FILES+=usr/share/man/man1/pic.1.gz
OLD_FILES+=usr/share/man/man1/psroff.1.gz
OLD_FILES+=usr/share/man/man1/refer.1.gz
OLD_FILES+=usr/share/man/man1/tbl.1.gz
OLD_FILES+=usr/share/man/man1/tfmtodit.1.gz
OLD_FILES+=usr/share/man/man1/troff.1.gz
OLD_FILES+=usr/share/man/man1/vgrind.1.gz
OLD_FILES+=usr/share/man/man5/groff_font.5.gz
OLD_FILES+=usr/share/man/man5/groff_out.5.gz
OLD_FILES+=usr/share/man/man5/groff_tmac.5.gz
OLD_FILES+=usr/share/man/man5/lj4_font.5.gz
OLD_FILES+=usr/share/man/man5/tmac.5.gz
OLD_FILES+=usr/share/man/man5/vgrindefs.5.gz
OLD_FILES+=usr/share/man/man7/ditroff.7.gz
OLD_FILES+=usr/share/man/man7/groff.7.gz
OLD_FILES+=usr/share/man/man7/groff_char.7.gz
OLD_FILES+=usr/share/man/man7/groff_diff.7.gz
OLD_FILES+=usr/share/man/man7/groff_man.7.gz
OLD_FILES+=usr/share/man/man7/groff_mdoc.7.gz
OLD_FILES+=usr/share/man/man7/groff_me.7.gz
OLD_FILES+=usr/share/man/man7/groff_mm.7.gz
OLD_FILES+=usr/share/man/man7/groff_mmse.7.gz
OLD_FILES+=usr/share/man/man7/groff_ms.7.gz
OLD_FILES+=usr/share/man/man7/groff_trace.7.gz
OLD_FILES+=usr/share/man/man7/groff_www.7.gz
OLD_FILES+=usr/share/man/man7/mdoc.samples.7.gz
OLD_FILES+=usr/share/man/man7/me.7.gz
OLD_FILES+=usr/share/man/man7/mm.7.gz
OLD_FILES+=usr/share/man/man7/mmse.7.gz
OLD_FILES+=usr/share/man/man7/ms.7.gz
OLD_FILES+=usr/share/man/man7/orig_me.7.gz
OLD_FILES+=usr/share/me/acm.me
OLD_FILES+=usr/share/me/chars.me
OLD_FILES+=usr/share/me/deltext.me
OLD_FILES+=usr/share/me/eqn.me
OLD_FILES+=usr/share/me/float.me
OLD_FILES+=usr/share/me/footnote.me
OLD_FILES+=usr/share/me/index.me
OLD_FILES+=usr/share/me/letterhead.me
OLD_FILES+=usr/share/me/local.me
OLD_FILES+=usr/share/me/null.me
OLD_FILES+=usr/share/me/refer.me
OLD_FILES+=usr/share/me/revisions
OLD_FILES+=usr/share/me/sh.me
OLD_FILES+=usr/share/me/tbl.me
OLD_FILES+=usr/share/me/thesis.me
OLD_DIRS+=usr/share/me
OLD_FILES+=usr/share/misc/vgrindefs
OLD_FILES+=usr/share/misc/vgrindefs.db
OLD_FILES+=usr/share/tmac/X.tmac
OLD_FILES+=usr/share/tmac/Xps.tmac
OLD_FILES+=usr/share/tmac/a4.tmac
OLD_FILES+=usr/share/tmac/an-old.tmac
OLD_FILES+=usr/share/tmac/an.tmac
OLD_FILES+=usr/share/tmac/andoc.tmac
OLD_FILES+=usr/share/tmac/composite.tmac
OLD_FILES+=usr/share/tmac/cp1047.tmac
OLD_FILES+=usr/share/tmac/devtag.tmac
OLD_FILES+=usr/share/tmac/doc.tmac
OLD_FILES+=usr/share/tmac/dvi.tmac
OLD_FILES+=usr/share/tmac/e.tmac
OLD_FILES+=usr/share/tmac/ec.tmac
OLD_FILES+=usr/share/tmac/eqnrc
OLD_FILES+=usr/share/tmac/europs.tmac
OLD_FILES+=usr/share/tmac/html-end.tmac
OLD_FILES+=usr/share/tmac/html.tmac
OLD_FILES+=usr/share/tmac/hyphen.ru
OLD_FILES+=usr/share/tmac/hyphen.us
OLD_FILES+=usr/share/tmac/hyphenex.us
OLD_FILES+=usr/share/tmac/koi8-r.tmac
OLD_FILES+=usr/share/tmac/latin1.tmac
OLD_FILES+=usr/share/tmac/latin2.tmac
OLD_FILES+=usr/share/tmac/latin9.tmac
OLD_FILES+=usr/share/tmac/lbp.tmac
OLD_FILES+=usr/share/tmac/lj4.tmac
OLD_FILES+=usr/share/tmac/m.tmac
OLD_FILES+=usr/share/tmac/man.local
OLD_FILES+=usr/share/tmac/man.tmac
OLD_FILES+=usr/share/tmac/mandoc.tmac
OLD_FILES+=usr/share/tmac/mdoc.local
OLD_FILES+=usr/share/tmac/mdoc.tmac
OLD_FILES+=usr/share/tmac/mdoc/doc-common
OLD_FILES+=usr/share/tmac/mdoc/doc-ditroff
OLD_FILES+=usr/share/tmac/mdoc/doc-nroff
OLD_FILES+=usr/share/tmac/mdoc/doc-syms
OLD_FILES+=usr/share/tmac/mdoc/fr.ISO8859-1
OLD_FILES+=usr/share/tmac/mdoc/ru.KOI8-R
OLD_DIRS+=usr/share/tmac/mdoc
OLD_FILES+=usr/share/tmac/me.tmac
OLD_FILES+=usr/share/tmac/mm/0.MT
OLD_FILES+=usr/share/tmac/mm/4.MT
OLD_FILES+=usr/share/tmac/mm/5.MT
OLD_FILES+=usr/share/tmac/mm/locale
OLD_FILES+=usr/share/tmac/mm/mm.tmac
OLD_FILES+=usr/share/tmac/mm/mmse.tmac
OLD_FILES+=usr/share/tmac/mm/ms.cov
OLD_FILES+=usr/share/tmac/mm/se_locale
OLD_FILES+=usr/share/tmac/mm/se_ms.cov
OLD_DIRS+=usr/share/tmac/mm
OLD_FILES+=usr/share/tmac/ms.tmac
OLD_FILES+=usr/share/tmac/mse.tmac
OLD_FILES+=usr/share/tmac/papersize.tmac
OLD_FILES+=usr/share/tmac/pic.tmac
OLD_FILES+=usr/share/tmac/ps.tmac
OLD_FILES+=usr/share/tmac/psatk.tmac
OLD_FILES+=usr/share/tmac/psold.tmac
OLD_FILES+=usr/share/tmac/pspic.tmac
OLD_FILES+=usr/share/tmac/s.tmac
OLD_FILES+=usr/share/tmac/safer.tmac
OLD_FILES+=usr/share/tmac/tmac.orig_me
OLD_FILES+=usr/share/tmac/tmac.vgrind
OLD_FILES+=usr/share/tmac/trace.tmac
OLD_FILES+=usr/share/tmac/troffrc
OLD_FILES+=usr/share/tmac/troffrc-end
OLD_FILES+=usr/share/tmac/tty-char.tmac
OLD_FILES+=usr/share/tmac/tty.tmac
OLD_FILES+=usr/share/tmac/unicode.tmac
OLD_FILES+=usr/share/tmac/www.tmac
OLD_DIRS+=usr/share/tmac
# 20170607: remove incorrect atf_check(1) manpage link
OLD_FILES+=usr/share/man/man1/atf_check.1.gz
# 20170601: remove stale manpage
OLD_FILES+=usr/share/man/man2/cap_rights_get.2.gz
# 20170601: old libifconfig and libifc
OLD_FILES+=usr/lib/libifc.a
OLD_FILES+=usr/lib/libifc_p.a
OLD_FILES+=usr/lib/libifconfig.a
OLD_FILES+=usr/lib/libifconfig_p.a
OLD_FILES+=usr/lib32/libifc.a
OLD_FILES+=usr/lib32/libifc_p.a
OLD_FILES+=usr/lib32/libifconfig.a
OLD_FILES+=usr/lib32/libifconfig_p.a
# 20170529: mount.conf(8) -> mount.conf(5)
OLD_FILES+=usr/share/man/man8/mount.conf.8.gz
# 20170525: remove misleading template
OLD_FILES+=usr/share/misc/man.template
# 20170525: disconnect the roff docs from the build
OLD_FILES+=usr/share/doc/papers/beyond43.ascii.gz
OLD_FILES+=usr/share/doc/papers/bio.ascii.gz
OLD_FILES+=usr/share/doc/papers/contents.ascii.gz
OLD_FILES+=usr/share/doc/papers/devfs.ascii.gz
OLD_FILES+=usr/share/doc/papers/diskperf.ascii.gz
OLD_FILES+=usr/share/doc/papers/fsinterface.ascii.gz
OLD_FILES+=usr/share/doc/papers/hwpmc.ascii.gz
OLD_FILES+=usr/share/doc/papers/jail.ascii.gz
OLD_FILES+=usr/share/doc/papers/kernmalloc.ascii.gz
OLD_FILES+=usr/share/doc/papers/kerntune.ascii.gz
OLD_FILES+=usr/share/doc/papers/malloc.ascii.gz
OLD_FILES+=usr/share/doc/papers/newvm.ascii.gz
OLD_FILES+=usr/share/doc/papers/releng.ascii.gz
OLD_FILES+=usr/share/doc/papers/sysperf.ascii.gz
OLD_FILES+=usr/share/doc/papers/timecounter.ascii.gz
OLD_DIRS+=usr/share/doc/papers
OLD_FILES+=usr/share/doc/psd/01.cacm/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/01.cacm
OLD_FILES+=usr/share/doc/psd/02.implement/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/02.implement
OLD_FILES+=usr/share/doc/psd/03.iosys/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/03.iosys
OLD_FILES+=usr/share/doc/psd/04.uprog/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/04.uprog
OLD_FILES+=usr/share/doc/psd/05.sysman/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/05.sysman
OLD_FILES+=usr/share/doc/psd/06.Clang/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/06.Clang
OLD_FILES+=usr/share/doc/psd/12.make/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/12.make
OLD_FILES+=usr/share/doc/psd/13.rcs/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/13.rcs
OLD_FILES+=usr/share/doc/psd/13.rcs/rcs_func.ascii.gz
OLD_DIRS+=usr/share/doc/psd/13.rcs
OLD_FILES+=usr/share/doc/psd/15.yacc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/15.yacc
OLD_FILES+=usr/share/doc/psd/16.lex/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/16.lex
OLD_FILES+=usr/share/doc/psd/17.m4/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/17.m4
OLD_FILES+=usr/share/doc/psd/18.gprof/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/18.gprof
OLD_FILES+=usr/share/doc/psd/20.ipctut/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/20.ipctut
OLD_FILES+=usr/share/doc/psd/21.ipc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/21.ipc
OLD_FILES+=usr/share/doc/psd/22.rpcgen/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/22.rpcgen
OLD_FILES+=usr/share/doc/psd/23.rpc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/23.rpc
OLD_FILES+=usr/share/doc/psd/24.xdr/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/24.xdr
OLD_FILES+=usr/share/doc/psd/25.xdrrfc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/25.xdrrfc
OLD_FILES+=usr/share/doc/psd/26.rpcrfc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/26.rpcrfc
OLD_FILES+=usr/share/doc/psd/27.nfsrfc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/psd/27.nfsrfc
OLD_FILES+=usr/share/doc/psd/Title.ascii.gz
OLD_FILES+=usr/share/doc/psd/contents.ascii.gz
OLD_DIRS+=usr/share/doc/psd/
OLD_FILES+=usr/share/doc/smm/01.setup/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/01.setup
OLD_FILES+=usr/share/doc/smm/02.config/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/02.config
OLD_FILES+=usr/share/doc/smm/03.fsck/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/03.fsck
OLD_FILES+=usr/share/doc/smm/04.quotas/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/04.quotas
OLD_FILES+=usr/share/doc/smm/05.fastfs/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/05.fastfs
OLD_FILES+=usr/share/doc/smm/06.nfs/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/06.nfs
OLD_FILES+=usr/share/doc/smm/07.lpd/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/07.lpd
OLD_FILES+=usr/share/doc/smm/08.sendmailop/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/08.sendmailop
OLD_FILES+=usr/share/doc/smm/11.timedop/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/11.timedop
OLD_FILES+=usr/share/doc/smm/12.timed/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/12.timed
OLD_FILES+=usr/share/doc/smm/18.net/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/18.net
OLD_FILES+=usr/share/doc/smm/Title.ascii.gz
OLD_FILES+=usr/share/doc/smm/contents.ascii.gz
OLD_DIRS+=usr/share/doc/smm
OLD_FILES+=usr/share/doc/usd/04.csh/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/04.csh
OLD_FILES+=usr/share/doc/usd/05.dc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/05.dc
OLD_FILES+=usr/share/doc/usd/06.bc/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/06.bc
OLD_FILES+=usr/share/doc/usd/07.mail/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/07.mail
OLD_FILES+=usr/share/doc/usd/10.exref/paper.ascii.gz
OLD_FILES+=usr/share/doc/usd/10.exref/summary.ascii.gz
OLD_DIRS+=usr/share/doc/usd/10.exref
OLD_FILES+=usr/share/doc/usd/11.edit/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/11.edit
OLD_FILES+=usr/share/doc/usd/12.vi/paper.ascii.gz
OLD_FILES+=usr/share/doc/usd/12.vi/summary.ascii.gz
OLD_FILES+=usr/share/doc/usd/12.vi/viapwh.ascii.gz
OLD_DIRS+=usr/share/doc/usd/12.vi
OLD_FILES+=usr/share/doc/usd/13.viref/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/13.viref
OLD_FILES+=usr/share/doc/usd/18.msdiffs/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/18.msdiffs
OLD_FILES+=usr/share/doc/usd/19.memacros/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/19.memacros
OLD_FILES+=usr/share/doc/usd/20.meref/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/20.meref
OLD_FILES+=usr/share/doc/usd/21.troff/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/21.troff
OLD_FILES+=usr/share/doc/usd/22.trofftut/paper.ascii.gz
OLD_DIRS+=usr/share/doc/usd/22.trofftut
OLD_FILES+=usr/share/doc/usd/Title.ascii.gz
OLD_FILES+=usr/share/doc/usd/contents.ascii.gz
OLD_DIRS+=usr/share/doc/usd
# 20170523: 64-bit inode support, library version bumps
OLD_LIBS+=lib/libzfs.so.2
OLD_LIBS+=usr/lib/libarchive.so.6
OLD_LIBS+=usr/lib/libmilter.so.5
OLD_LIBS+=usr/lib32/libzfs.so.2
OLD_LIBS+=usr/lib32/libarchive.so.6
OLD_LIBS+=usr/lib32/libmilter.so.5
# 20170427: NATM configuration support removed
OLD_FILES+=etc/rc.d/atm1
OLD_FILES+=etc/rc.d/atm2
OLD_FILES+=etc/rc.d/atm3
# 20170424: NATM support removed
OLD_FILES+=rescue/atmconfig
OLD_FILES+=sbin/atmconfig
OLD_FILES+=usr/include/bsnmp/snmp_atm.h
OLD_FILES+=usr/include/dev/utopia/idtphy.h
OLD_FILES+=usr/include/dev/utopia/suni.h
OLD_FILES+=usr/include/dev/utopia/utopia.h
OLD_FILES+=usr/include/dev/utopia/utopia_priv.h
OLD_DIRS+=usr/include/dev/utopia
OLD_FILES+=usr/include/net/if_atm.h
OLD_FILES+=usr/include/netgraph/atm/ng_atm.h
OLD_FILES+=usr/include/netinet/if_atm.h
OLD_FILES+=usr/include/netnatm/natm.h
OLD_FILES+=usr/lib/debug/sbin/atmconfig.debug
OLD_FILES+=usr/lib/debug/usr/lib/snmp_atm.so.6.debug
OLD_FILES+=usr/lib/snmp_atm.so
OLD_FILES+=usr/lib/snmp_atm.so.6
OLD_FILES+=usr/share/doc/atm/atmconfig.help
OLD_FILES+=usr/share/doc/atm/atmconfig_device.help
OLD_DIRS+=usr/share/doc/atm
OLD_FILES+=usr/share/man/man3/snmp_atm.3.gz
OLD_FILES+=usr/share/man/man4/en.4.gz
OLD_FILES+=usr/share/man/man4/fatm.4.gz
OLD_FILES+=usr/share/man/man4/hatm.4.gz
OLD_FILES+=usr/share/man/man4/if_en.4.gz
OLD_FILES+=usr/share/man/man4/if_fatm.4.gz
OLD_FILES+=usr/share/man/man4/if_hatm.4.gz
OLD_FILES+=usr/share/man/man4/if_patm.4.gz
OLD_FILES+=usr/share/man/man4/natm.4.gz
OLD_FILES+=usr/share/man/man4/natmip.4.gz
OLD_FILES+=usr/share/man/man4/ng_atm.4.gz
OLD_FILES+=usr/share/man/man4/patm.4.gz
OLD_FILES+=usr/share/man/man4/utopia.4.gz
OLD_FILES+=usr/share/man/man8/atmconfig.8.gz
OLD_FILES+=usr/share/man/man9/utopia.9.gz
OLD_FILES+=usr/share/snmp/defs/atm_freebsd.def
OLD_FILES+=usr/share/snmp/defs/atm_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM-FREEBSD-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM.txt
# 20170420: remove GNU diff
OLD_FILES+=usr/share/man/man7/diff.7.gz
# 20170322: rename <x> to <x>_test to match the FreeBSD test suite name scheme
OLD_FILES+=usr/tests/usr.bin/col/col
OLD_FILES+=usr/tests/usr.bin/diff/diff
OLD_FILES+=usr/tests/usr.bin/ident/ident
OLD_FILES+=usr/tests/usr.bin/mkimg/mkimg
OLD_FILES+=usr/tests/usr.bin/sdiff/sdiff
OLD_FILES+=usr/tests/usr.bin/soelim/soelim
OLD_FILES+=usr/tests/usr.sbin/pw/pw_config
OLD_FILES+=usr/tests/usr.sbin/pw/pw_etcdir
OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupadd
OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupdel
OLD_FILES+=usr/tests/usr.sbin/pw/pw_groupmod
OLD_FILES+=usr/tests/usr.sbin/pw/pw_lock
OLD_FILES+=usr/tests/usr.sbin/pw/pw_useradd
OLD_FILES+=usr/tests/usr.sbin/pw/pw_userdel
OLD_FILES+=usr/tests/usr.sbin/pw/pw_usermod
OLD_FILES+=usr/tests/usr.sbin/pw/pw_usernext
# 20170322: garbage collect old references to igb(4)
OLD_FILES+=usr/share/man/man4/if_igb.4.gz
OLD_FILES+=usr/share/man/man4/igb.4.gz
# 20170319: io_test requires zh_TW.Big5 locale.
OLD_FILES+=usr/tests/lib/libc/locale/io_test
# 20170319: remove nls for non supported Big5* locales
OLD_DIRS+=usr/share/nls/zh_HK.Big5HKSCS
OLD_DIRS+=usr/share/nls/zh_TW.Big5
# 20170313: move .../sys/geom/eli/... to .../sys/geom/class/eli/...
OLD_FILES+=usr/tests/sys/geom/eli/pbkdf2/pbkdf2
OLD_FILES+=usr/tests/sys/geom/eli/pbkdf2/Kyuafile
OLD_FILES+=usr/tests/sys/geom/eli/Kyuafile
OLD_DIRS+=usr/tests/sys/geom/eli/pbkdf2
OLD_DIRS+=usr/tests/sys/geom/eli
# 20170313: sbin/ipftest and ipresend temporarily disconnected.
OLD_FILES+=sbin/ipftest
OLD_FILES+=sbin/ipresend
# 20170311: Remove WITHOUT_MANDOCDB option
OLD_FILES+=usr/share/man/man1/makewhatis.1.gz
# 20170308: rename some tests
OLD_FILES+=usr/tests/bin/pwait/pwait
OLD_FILES+=usr/tests/usr.bin/timeout/timeout
# 20170307: remove pcap-int.h
OLD_FILES+=usr/include/pcap-int.h
# 20170302: new libc++ import which bumps version from 3.9.1 to 4.0.0.
OLD_FILES+=usr/include/c++/v1/__undef___deallocate
OLD_FILES+=usr/include/c++/v1/tr1/__undef___deallocate
# 20170302: new clang import which bumps version from 3.9.1 to 4.0.0.
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.9.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.9.1/include/sanitizer
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.9.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.9.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/altivec.h
OLD_FILES+=usr/lib/clang/3.9.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.9.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.9.1/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.9.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.9.1/include/msa.h
OLD_FILES+=usr/lib/clang/3.9.1/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/opencl-c.h
OLD_FILES+=usr/lib/clang/3.9.1/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.9.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/3.9.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.9.1/include
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.9.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.9.1/lib
OLD_DIRS+=usr/lib/clang/3.9.1
# 20170226: SVR4 compatibility removed
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/streams.4
OLD_FILES+=usr/share/man/man4/svr4.4
.endif
# 20170219: OpenPAM RADULA upgrade removed the libpam tests
OLD_FILES+=usr/tests/lib/libpam/Kyuafile
OLD_FILES+=usr/tests/lib/libpam/t_openpam_ctype
OLD_FILES+=usr/tests/lib/libpam/t_openpam_readlinev
OLD_FILES+=usr/tests/lib/libpam/t_openpam_readword
OLD_DIRS+=usr/test/lib/libpam
# 20170206: remove bdes(1)
OLD_FILES+=usr/bin/bdes
OLD_FILES+=usr/lib/debug/usr/bin/bdes.debug
OLD_FILES+=usr/share/man/man1/bdes.1.gz
# 20170206: merged projects/ipsec
OLD_FILES+=usr/include/netinet/ip_ipsec.h
OLD_FILES+=usr/include/netinet6/ip6_ipsec.h
# 20170128: remove pc98 support
OLD_FILES+=usr/include/dev/ic/i8251.h
OLD_FILES+=usr/include/dev/ic/i8255.h
OLD_FILES+=usr/include/dev/ic/rsa.h
OLD_FILES+=usr/include/dev/ic/wd33c93reg.h
OLD_FILES+=usr/include/sys/disk/pc98.h
OLD_FILES+=usr/include/sys/diskpc98.h
OLD_FILES+=usr/share/man/man4/i386/ct.4.gz
OLD_FILES+=usr/share/man/man4/i386/snc.4.gz
OLD_FILES+=usr/share/syscons/keymaps/jp.pc98.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/jp.pc98.kbd
OLD_FILES+=usr/share/vt/keymaps/jp.pc98.iso.kbd
OLD_FILES+=usr/share/vt/keymaps/jp.pc98.kbd
# 20170110: Four files from ggate tests consolidated into one
OLD_FILES+=usr/tests/sys/geom/class/gate/1_test
OLD_FILES+=usr/tests/sys/geom/class/gate/2_test
OLD_FILES+=usr/tests/sys/geom/class/gate/3_test
OLD_FILES+=usr/tests/sys/geom/class/gate/conf.sh
# 20170103: libbsnmptools.so made into an INTERNALLIB
OLD_FILES+=usr/lib/libbsnmptools.a
OLD_FILES+=usr/lib/libbsnmptools_p.a
OLD_LIBS+=usr/lib/libbsnmptools.so.0
OLD_LIBS+=usr/lib/libbsnmptools.so
# 20170102: sysdecode_getfsstat_flags() renamed to sysdecode_getfsstat_mode()
OLD_FILES+=usr/share/man/man3/sysdecode_getfsstat_flags.3.gz
# 20161230: libarchive ACL pax test renamed to test_acl_pax_posix1e.tar.uu
OLD_FILES+=usr/tests/lib/libarchive/test_acl_pax.tar.uu
# 20161229: Three files from gnop tests consolidated into one
OLD_FILES+=usr/tests/sys/geom/class/nop/1_test
OLD_FILES+=usr/tests/sys/geom/class/nop/2_test
OLD_FILES+=usr/tests/sys/geom/class/nop/conf.sh
# 20161217: new clang import which bumps version from 3.9.0 to 3.9.1.
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/esan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.9.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.9.0/include/sanitizer
OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_cmath.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_intrinsics.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_math_forward_declares.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.9.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.9.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/altivec.h
OLD_FILES+=usr/lib/clang/3.9.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.9.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512ifmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512ifmavlintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512pfintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vbmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vbmivlintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlcdintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/clflushoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.9.0/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.9.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.9.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.9.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.9.0/include/mwaitxintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/opencl-c.h
OLD_FILES+=usr/lib/clang/3.9.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.9.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/3.9.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.9.0/include
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats_client-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.9.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.9.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.9.0/lib
OLD_DIRS+=usr/lib/clang/3.9.0
# 20161205: libproc version bump
OLD_LIBS+=usr/lib/libproc.so.3
OLD_LIBS+=usr/lib32/libproc.so.3
# 20161127: Remove vm_page_cache(9)
OLD_FILES+=usr/share/man/man9/vm_page_cache.9.gz
# 20161124: new clang import which bumps version from 3.8.0 to 3.9.0.
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.8.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.8.0/include/sanitizer
OLD_FILES+=usr/lib/clang/3.8.0/include/__clang_cuda_runtime_wrapper.h
OLD_FILES+=usr/lib/clang/3.8.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.8.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.8.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.8.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/altivec.h
OLD_FILES+=usr/lib/clang/3.8.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.8.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.8.0/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.8.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.8.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.8.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.8.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/pkuintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.8.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xsavecintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xsaveintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xsaveoptintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xsavesintrin.h
OLD_FILES+=usr/lib/clang/3.8.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.8.0/include
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-i386.so
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan-x86_64.so
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.8.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.8.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.8.0/lib
OLD_DIRS+=usr/lib/clang/3.8.0
# 20161121: Hyper-V manuals only apply to amd64 and i386.
.if ${TARGET_ARCH} != "amd64" && ${TARGET_ARCH} != "i386"
OLD_FILES+=usr/share/man/man4/hv_kvp.4.gz
OLD_FILES+=usr/share/man/man4/hv_netvsc.4.gz
OLD_FILES+=usr/share/man/man4/hv_storvsc.4.gz
OLD_FILES+=usr/share/man/man4/hv_utils.4.gz
OLD_FILES+=usr/share/man/man4/hv_vmbus.4.gz
OLD_FILES+=usr/share/man/man4/hv_vss.4.gz
.endif
# 20161118: Remove hv_ata_pci_disengage(4)
OLD_FILES+=usr/share/man/man4/hv_ata_pci_disengage.4.gz
# 20161017: urtwn(4) was merged into rtwn(4)
OLD_FILES+=usr/share/man/man4/urtwn.4.gz
OLD_FILES+=usr/share/man/man4/urtwnfw.4.gz
# 20161015: Remove GNU rcs
OLD_FILES+=usr/bin/ci
OLD_FILES+=usr/bin/co
OLD_FILES+=usr/bin/merge
OLD_FILES+=usr/bin/rcs
OLD_FILES+=usr/bin/rcsclean
OLD_FILES+=usr/bin/rcsdiff
OLD_FILES+=usr/bin/rcsfreeze
OLD_FILES+=usr/bin/rcsmerge
OLD_FILES+=usr/bin/rlog
OLD_FILES+=usr/share/doc/psd/13.rcs/paper.ascii.gz
OLD_FILES+=usr/share/doc/psd/13.rcs/rcs_func.ascii.gz
OLD_DIRS+=usr/share/doc/psd/13.rcs
OLD_FILES+=usr/share/man/man1/ci.1.gz
OLD_FILES+=usr/share/man/man1/co.1.gz
OLD_FILES+=usr/share/man/man1/merge.1.gz
OLD_FILES+=usr/share/man/man1/rcs.1.gz
OLD_FILES+=usr/share/man/man1/rcsclean.1.gz
OLD_FILES+=usr/share/man/man1/rcsdiff.1.gz
OLD_FILES+=usr/share/man/man1/rcsfreeze.1.gz
OLD_FILES+=usr/share/man/man1/rcsintro.1.gz
OLD_FILES+=usr/share/man/man1/rcsmerge.1.gz
OLD_FILES+=usr/share/man/man1/rlog.1.gz
OLD_FILES+=usr/share/man/man5/rcsfile.5.gz
# 20161010: remove link to removed m_getclr(9) macro
OLD_FILES+=usr/share/man/man9/m_getclr.9.gz
# 20161003: MK_ELFCOPY_AS_OBJCOPY option retired
OLD_FILES+=usr/bin/elfcopy
OLD_FILES+=usr/share/man/man1/elfcopy.1.gz
# 20160906: libkqueue tests moved to /usr/tests/sys/kqueue/libkqueue
OLD_FILES+=usr/tests/sys/kqueue/kqtest
OLD_FILES+=usr/tests/sys/kqueue/kqueue_test
# 20160903: idle page zeroing support removed
OLD_FILES+=usr/share/man/man9/pmap_zero_idle.9.gz
# 20160901: Remove digi(4)
OLD_FILES+=usr/share/man/man4/digi.4.gz
# 20160819: Remove ie(4)
OLD_FILES+=usr/share/man/man4/i386/ie.4.gz
# 20160819: Remove spic(4)
OLD_FILES+=usr/share/man/man4/spic.4.gz
# 20160819: Remove wl(4) and wlconfig(8)
OLD_FILES+=usr/share/man/man4/i386/wl.4.gz
OLD_FILES+=usr/sbin/wlconfig
OLD_FILES+=usr/share/man/man8/i386/wlconfig.8.gz
# 20160819: Remove si(4) and sicontrol(8)
OLD_FILES+=usr/share/man/man4/si.4.gz
OLD_FILES+=usr/sbin/sicontrol
OLD_FILES+=usr/share/man/man8/sicontrol.8.gz
# 20160819: Remove scd(4)
OLD_FILES+=usr/share/man/man4/scd.4.gz
# 20160815: Remove mcd(4)
OLD_FILES+=usr/share/man/man4/mcd.4.gz
# 20160805: lockmgr_waiters(9) removed
OLD_FILES+=usr/share/man/man9/lockmgr_waiters.9.gz
# 20160703: POSIXify locales with variants
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hant_TW.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hant_TW.UTF-8
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hant_TW.Big5/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hant_TW.Big5
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hant_HK.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hant_HK.UTF-8
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.eucCN/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.eucCN
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.UTF-8
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.GBK/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.GBK
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB2312/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.GB2312
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hans_CN.GB18030/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hans_CN.GB18030
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_Latn_RS.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sr_Latn_RS.UTF-8
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_Latn_RS.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/sr_Latn_RS.ISO8859-2
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sr_Cyrl_RS.UTF-8
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_Cyrl_RS.ISO8859-5/LC_TIME
OLD_DIRS+=usr/share/locale/sr_Cyrl_RS.ISO8859-5
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/mn_Cyrl_MN.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/mn_Cyrl_MN.UTF-8
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/kk_Cyrl_KZ.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/kk_Cyrl_KZ.UTF-8
# 20160608: removed pam_verbose_error
OLD_LIBS+=usr/lib/libpam.so.5
OLD_LIBS+=usr/lib/pam_chroot.so.5
OLD_LIBS+=usr/lib/pam_deny.so.5
OLD_LIBS+=usr/lib/pam_echo.so.5
OLD_LIBS+=usr/lib/pam_exec.so.5
OLD_LIBS+=usr/lib/pam_ftpusers.so.5
OLD_LIBS+=usr/lib/pam_group.so.5
OLD_LIBS+=usr/lib/pam_guest.so.5
OLD_LIBS+=usr/lib/pam_krb5.so.5
OLD_LIBS+=usr/lib/pam_ksu.so.5
OLD_LIBS+=usr/lib/pam_lastlog.so.5
OLD_LIBS+=usr/lib/pam_login_access.so.5
OLD_LIBS+=usr/lib/pam_nologin.so.5
OLD_LIBS+=usr/lib/pam_opie.so.5
OLD_LIBS+=usr/lib/pam_opieaccess.so.5
OLD_LIBS+=usr/lib/pam_passwdqc.so.5
OLD_LIBS+=usr/lib/pam_permit.so.5
OLD_LIBS+=usr/lib/pam_radius.so.5
OLD_LIBS+=usr/lib/pam_rhosts.so.5
OLD_LIBS+=usr/lib/pam_rootok.so.5
OLD_LIBS+=usr/lib/pam_securetty.so.5
OLD_LIBS+=usr/lib/pam_self.so.5
OLD_LIBS+=usr/lib/pam_ssh.so.5
OLD_LIBS+=usr/lib/pam_tacplus.so.5
OLD_LIBS+=usr/lib/pam_unix.so.5
OLD_LIBS+=usr/lib32/libpam.so.5
OLD_LIBS+=usr/lib32/pam_chroot.so.5
OLD_LIBS+=usr/lib32/pam_deny.so.5
OLD_LIBS+=usr/lib32/pam_echo.so.5
OLD_LIBS+=usr/lib32/pam_exec.so.5
OLD_LIBS+=usr/lib32/pam_ftpusers.so.5
OLD_LIBS+=usr/lib32/pam_group.so.5
OLD_LIBS+=usr/lib32/pam_guest.so.5
OLD_LIBS+=usr/lib32/pam_krb5.so.5
OLD_LIBS+=usr/lib32/pam_ksu.so.5
OLD_LIBS+=usr/lib32/pam_lastlog.so.5
OLD_LIBS+=usr/lib32/pam_login_access.so.5
OLD_LIBS+=usr/lib32/pam_nologin.so.5
OLD_LIBS+=usr/lib32/pam_opie.so.5
OLD_LIBS+=usr/lib32/pam_opieaccess.so.5
OLD_LIBS+=usr/lib32/pam_passwdqc.so.5
OLD_LIBS+=usr/lib32/pam_permit.so.5
OLD_LIBS+=usr/lib32/pam_radius.so.5
OLD_LIBS+=usr/lib32/pam_rhosts.so.5
OLD_LIBS+=usr/lib32/pam_rootok.so.5
OLD_LIBS+=usr/lib32/pam_securetty.so.5
OLD_LIBS+=usr/lib32/pam_self.so.5
OLD_LIBS+=usr/lib32/pam_ssh.so.5
OLD_LIBS+=usr/lib32/pam_tacplus.so.5
OLD_LIBS+=usr/lib32/pam_unix.so.5
# 20160523: remove extranous ALTQ files
OLD_FILES+=usr/include/altq/altq_codel.h
OLD_FILES+=usr/include/altq/altq_fairq.h
# 20160519: remove DTrace Toolkit from base
OLD_FILES+=usr/sbin/dtruss
OLD_FILES+=usr/share/dtrace/toolkit/execsnoop
OLD_FILES+=usr/share/dtrace/toolkit/hotkernel
OLD_FILES+=usr/share/dtrace/toolkit/hotuser
OLD_FILES+=usr/share/dtrace/toolkit/opensnoop
OLD_FILES+=usr/share/dtrace/toolkit/procsystime
OLD_DIRS+=usr/share/dtrace/toolkit
OLD_FILES+=usr/share/man/man1/dtruss.1.gz
# 20160519: stale MLINK removed
OLD_FILES+=usr/share/man/man9/rman_await_resource.9.gz
# 20160517: ReiserFS removed
OLD_FILES+=usr/share/man/man5/reiserfs.5.gz
# 20160504: tests rework
OLD_FILES+=usr/tests/lib/libc/regex/data/README
# 20160430: kvm_getfiles(3) removed from kvm(3)
OLD_LIBS+=lib/libkvm.so.6
OLD_LIBS+=usr/lib32/libkvm.so.6
OLD_FILES+=usr/share/man/man3/kvm_getfiles.3.gz
# 20160423: remove mroute6d
OLD_FILES+=etc/rc.d/mroute6d
# 20160419: rename units.lib -> definitions.units
OLD_FILES+=usr/share/misc/units.lib
# 20160419: remove Big5HKSCS locales
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_HK.Big5HKSCS/LC_TIME
OLD_DIRS+=usr/share/locale/zh_HK.Big5HKSCS
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_Hant_HK.Big5HKSCS/LC_TIME
OLD_DIRS+=usr/share/locale/zh_Hant_HK.Big5HKSCS
# 20160317: rman_res_t size bump to uintmax_t
OLD_LIBS+=usr/lib/libdevinfo.so.5
OLD_LIBS+=usr/lib32/libdevinfo.so.5
# 20160305: new clang import which bumps version from 3.7.1 to 3.8.0.
OLD_FILES+=usr/bin/macho-dump
OLD_FILES+=usr/bin/tblgen
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.7.1/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.7.1/include/sanitizer
OLD_FILES+=usr/lib/clang/3.7.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.7.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.7.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.7.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/altivec.h
OLD_FILES+=usr/lib/clang/3.7.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.7.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.7.1/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.7.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.7.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.7.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.7.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.7.1/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.7.1/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.7.1/include
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.7.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.7.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.7.1/lib
OLD_DIRS+=usr/lib/clang/3.7.1
# 20160301: Remove taskqueue_enqueue_fast
OLD_FILES+=usr/share/man/man9/taskqueue_enqueue_fast.9.gz
# 20160225: Remove casperd and libcapsicum.
OLD_FILES+=sbin/casperd
OLD_FILES+=etc/rc.d/casperd
OLD_FILES+=usr/share/man/man8/casperd.8.gz
OLD_FILES+=usr/include/libcapsicum.h
OLD_FILES+=usr/include/libcapsicum_service.h
OLD_FILES+=usr/include/libcapsicum.h
OLD_FILES+=usr/share/man/man3/libcapsicum.3.gz
OLD_FILES+=usr/include/libcapsicum_dns.h
OLD_FILES+=usr/include/libcapsicum_grp.h
OLD_FILES+=usr/include/libcapsicum_impl.h
OLD_FILES+=usr/include/libcapsicum_pwd.h
OLD_FILES+=usr/include/libcapsicum_random.h
OLD_FILES+=usr/include/libcapsicum_sysctl.h
OLD_FILES+=libexec/casper/dns
OLD_FILES+=libexec/casper/grp
OLD_FILES+=libexec/casper/pwd
OLD_FILES+=libexec/casper/random
OLD_FILES+=libexec/casper/sysctl
OLD_FILES+=libexec/casper/.debug/random.debug
OLD_FILES+=libexec/casper/.debug/dns.debug
OLD_FILES+=libexec/casper/.debug/sysctl.debug
OLD_FILES+=libexec/casper/.debug/pwd.debug
OLD_FILES+=libexec/casper/.debug/grp.debug
OLD_DIRS+=libexec/casper/.debug
OLD_DIRS+=libexec/casper
OLD_FILES+=usr/lib/libcapsicum.a
OLD_FILES+=usr/lib/libcapsicum.so
OLD_LIBS+=lib/libcapsicum.so.0
OLD_FILES+=usr/lib/libcapsicum_p.a
OLD_FILES+=usr/lib32/libcapsicum.a
OLD_FILES+=usr/lib32/libcapsicum.so
OLD_LIBS+=usr/lib32/libcapsicum.so.0
OLD_FILES+=usr/lib32/libcapsicum_p.a
# 20160223: functionality from mkulzma(1) merged into mkuzip(1)
OLD_FILES+=usr/bin/mkulzma
OLD_FILES+=usr/share/man/man4/geom_uncompress.4.gz
OLD_FILES+=usr/share/man/man8/mkulzma.8.gz
# 20160211: Remove obsolete unbound-control-setup
OLD_FILES+=usr/sbin/unbound-control-setup
# 20160121: cc.h moved
OLD_FILES+=usr/include/netinet/cc.h
# 20160116: Update mandoc to cvs snapshot 20160116
OLD_FILES+=usr/share/mdocml/example.style.css
OLD_FILES+=usr/share/mdocml/style.css
OLD_DIRS+=usr/share/mdocml
# 20160114: SA-16:06.snmpd
OLD_FILES+=usr/share/examples/etc/snmpd.config
# 20160107: GNU ld installed as ld.bfd and linked as ld
OLD_FILES+=usr/lib/debug/usr/bin/ld.debug
# 20151225: new clang import which bumps version from 3.7.0 to 3.7.1.
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/allocator_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/asan_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/common_interface_defs.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/coverage_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/dfsan_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/linux_syscall_hooks.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/lsan_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/msan_interface.h
OLD_FILES+=usr/lib/clang/3.7.0/include/sanitizer/tsan_interface_atomic.h
OLD_DIRS+=usr/lib/clang/3.7.0/include/sanitizer
OLD_FILES+=usr/lib/clang/3.7.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.7.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.7.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.7.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/altivec.h
OLD_FILES+=usr/lib/clang/3.7.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.7.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512cdintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512dqintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vldqintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.7.0/include/cuda_builtin_vars.h
OLD_FILES+=usr/lib/clang/3.7.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/fxsrintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/htmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/htmxlintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.7.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.7.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.7.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/s390intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/vadefs.h
OLD_FILES+=usr/lib/clang/3.7.0/include/vecintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/xopintrin.h
OLD_FILES+=usr/lib/clang/3.7.0/include/xtestintrin.h
OLD_DIRS+=usr/lib/clang/3.7.0/include
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.safestack-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.safestack-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.7.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.7.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.7.0/lib
OLD_DIRS+=usr/lib/clang/3.7.0
# 20151130: libelf moved from /usr/lib to /lib (libkvm dependency in r291406)
OLD_LIBS+=usr/lib/libelf.so.2
# 20151115: Fox bad upgrade scheme
OLD_FILES+=usr/share/locale/zh_CN.GB18030/zh_Hans_CN.GB18030
OLD_FILES+=usr/share/locale/zh_CN.GB2312/zh_Hans_CN.GB2312
OLD_FILES+=usr/share/locale/zh_CN.GBK/zh_Hans_CN.GBK
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/zh_Hans_CN.UTF-8
OLD_FILES+=usr/share/locale/zh_CN.eucCN/zh_Hans_CN.eucCN
OLD_FILES+=usr/share/locale/zh_TW.Big5/zh_Hant_TW.Big5
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/zh_Hant_TW.UTF-8
# 20151107: String collation improvements
OLD_FILES+=usr/share/locale/UTF-8/LC_CTYPE
OLD_DIRS+=usr/share/locale/UTF-8
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_COLLATE
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_CTYPE
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MESSAGES
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_MONETARY
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_NUMERIC
OLD_FILES+=usr/share/locale/kk_KZ.PT154/LC_TIME
OLD_DIRS+=usr/share/locale/kk_KZ.PT154/
OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-1
OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-13/LC_CTYPE
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-13
OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-15
OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-2
OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.ISO8859-4/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.ISO8859-4
OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/la_LN.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/la_LN.US-ASCII
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_MESSAGES
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_TIME
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_COLLATE
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_MONETARY
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_CTYPE
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-4/LC_NUMERIC
OLD_DIRS+=usr/share/locale/lt_LT.ISO8859-4
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/no_NO.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/no_NO.ISO8859-1
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/no_NO.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/no_NO.ISO8859-15
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/no_NO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/no_NO.UTF-8
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_TIME
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-2/LC_MONETARY
OLD_DIRS+=usr/share/locale/sr_YU.ISO8859-2
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_TIME
OLD_FILES+=usr/share/locale/sr_YU.ISO8859-5/LC_MESSAGES
OLD_DIRS+=usr/share/locale/sr_YU.ISO8859-5
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_TIME
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_YU.UTF-8/LC_MESSAGES
OLD_DIRS+=usr/share/locale/sr_YU.UTF-8
# 20151101: added missing _test suffix on multiple tests in lib/libc
OLD_FILES+=usr/tests/lib/libc/c063/faccessat
OLD_FILES+=usr/tests/lib/libc/c063/fchmodat
OLD_FILES+=usr/tests/lib/libc/c063/fchownat
OLD_FILES+=usr/tests/lib/libc/c063/fexecve
OLD_FILES+=usr/tests/lib/libc/c063/fstatat
OLD_FILES+=usr/tests/lib/libc/c063/linkat
OLD_FILES+=usr/tests/lib/libc/c063/mkdirat
OLD_FILES+=usr/tests/lib/libc/c063/mkfifoat
OLD_FILES+=usr/tests/lib/libc/c063/mknodat
OLD_FILES+=usr/tests/lib/libc/c063/openat
OLD_FILES+=usr/tests/lib/libc/c063/readlinkat
OLD_FILES+=usr/tests/lib/libc/c063/renameat
OLD_FILES+=usr/tests/lib/libc/c063/symlinkat
OLD_FILES+=usr/tests/lib/libc/c063/unlinkat
OLD_FILES+=usr/tests/lib/libc/c063/utimensat
OLD_FILES+=usr/tests/lib/libc/string/memchr
OLD_FILES+=usr/tests/lib/libc/string/memcpy
OLD_FILES+=usr/tests/lib/libc/string/memmem
OLD_FILES+=usr/tests/lib/libc/string/memset
OLD_FILES+=usr/tests/lib/libc/string/strcat
OLD_FILES+=usr/tests/lib/libc/string/strchr
OLD_FILES+=usr/tests/lib/libc/string/strcmp
OLD_FILES+=usr/tests/lib/libc/string/strcpy
OLD_FILES+=usr/tests/lib/libc/string/strcspn
OLD_FILES+=usr/tests/lib/libc/string/strerror
OLD_FILES+=usr/tests/lib/libc/string/strlen
OLD_FILES+=usr/tests/lib/libc/string/strpbrk
OLD_FILES+=usr/tests/lib/libc/string/strrchr
OLD_FILES+=usr/tests/lib/libc/string/strspn
OLD_FILES+=usr/tests/lib/libc/string/swab
# 20151101: 430.status-rwho was renamed to 430.status-uptime
OLD_FILES+=etc/periodic/daily/430.status-rwho
# 20151030: OpenSSL 1.0.2d import
OLD_FILES+=usr/share/openssl/man/man3/CMS_set1_signer_certs.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_ctrl.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_ctrl_str.3.gz
OLD_FILES+=usr/share/openssl/man/man3/d2i_509_CRL_fp.3.gz
OLD_LIBS+=lib/libcrypto.so.7
OLD_LIBS+=usr/lib/libssl.so.7
OLD_LIBS+=usr/lib32/libcrypto.so.7
OLD_LIBS+=usr/lib32/libssl.so.7
# 20151029: LinuxKPI moved to sys/compat/linuxkpi
OLD_FILES+=usr/include/dev/usb/usb_compat_linux.h
# 20151015: test symbols moved to /usr/lib/debug
OLD_DIRS+=usr/tests/lib/atf/libatf-c++/.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/atf_c++_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/build_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/check_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/config_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/macros_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/tests_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/.debug/utils_test.debug
OLD_DIRS+=usr/tests/lib/atf/libatf-c++/detail/.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/application_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/env_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/exceptions_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/fs_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/process_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/sanity_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/text_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/.debug/version_helper.debug
OLD_DIRS+=usr/tests/lib/atf/libatf-c/.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/atf_c_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/build_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/check_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/config_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/error_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/macros_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/tc_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/tp_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/.debug/utils_test.debug
OLD_DIRS+=usr/tests/lib/atf/libatf-c/detail/.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/dynstr_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/env_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/fs_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/list_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/map_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/process_helpers.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/process_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/sanity_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/text_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/user_test.debug
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/.debug/version_helper.debug
OLD_DIRS+=usr/tests/lib/atf/test-programs/.debug
OLD_FILES+=usr/tests/lib/atf/test-programs/.debug/c_helpers.debug
OLD_FILES+=usr/tests/lib/atf/test-programs/.debug/cpp_helpers.debug
OLD_DIRS+=usr/tests/lib/libc/c063/.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/faccessat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/fchmodat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/fchownat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/fexecve.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/fstatat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/linkat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/mkdirat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/mkfifoat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/mknodat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/openat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/readlinkat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/renameat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/symlinkat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/unlinkat.debug
OLD_FILES+=usr/tests/lib/libc/c063/.debug/utimensat.debug
OLD_DIRS+=usr/tests/lib/libc/db/.debug
OLD_FILES+=usr/tests/lib/libc/db/.debug/h_db.debug
OLD_DIRS+=usr/tests/lib/libc/gen/.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/alarm_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/arc4random_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/assert_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/basedirname_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/dir_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/floatunditf_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fnmatch_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpclassify2_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpclassify_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpsetmask_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/fpsetround_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/ftok_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/getcwd_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/getgrent_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/glob_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/humanize_number_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/isnan_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/nice_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/pause_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/raise_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/realpath_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/setdomainname_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/sethostname_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/sleep_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/syslog_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/time_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/ttyname_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/.debug/vis_test.debug
OLD_DIRS+=usr/tests/lib/libc/gen/execve/.debug
OLD_FILES+=usr/tests/lib/libc/gen/execve/.debug/execve_test.debug
OLD_DIRS+=usr/tests/lib/libc/gen/posix_spawn/.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/fileactions_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_fileactions.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_spawn.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/h_spawnattr.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/spawn_test.debug
OLD_FILES+=usr/tests/lib/libc/gen/posix_spawn/.debug/spawnattr_test.debug
OLD_DIRS+=usr/tests/lib/libc/hash/.debug
OLD_FILES+=usr/tests/lib/libc/hash/.debug/h_hash.debug
OLD_FILES+=usr/tests/lib/libc/hash/.debug/sha2_test.debug
OLD_DIRS+=usr/tests/lib/libc/inet/.debug
OLD_FILES+=usr/tests/lib/libc/inet/.debug/inet_network_test.debug
OLD_DIRS+=usr/tests/lib/libc/locale/.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/io_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbrtowc_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbsnrtowcs_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbstowcs_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/mbtowc_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcscspn_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcspbrk_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcsspn_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wcstod_test.debug
OLD_FILES+=usr/tests/lib/libc/locale/.debug/wctomb_test.debug
OLD_DIRS+=usr/tests/lib/libc/net/.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/ether_aton_test.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/getprotoent_test.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/h_dns_server.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/h_nsd_recurse.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/h_protoent.debug
OLD_FILES+=usr/tests/lib/libc/net/.debug/h_servent.debug
OLD_DIRS+=usr/tests/lib/libc/regex/.debug
OLD_FILES+=usr/tests/lib/libc/regex/.debug/exhaust_test.debug
OLD_FILES+=usr/tests/lib/libc/regex/.debug/h_regex.debug
OLD_FILES+=usr/tests/lib/libc/regex/.debug/regex_att_test.debug
OLD_DIRS+=usr/tests/lib/libc/ssp/.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_fgets.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_getcwd.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_gets.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memcpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memmove.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_memset.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_raw.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_read.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_readlink.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_snprintf.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_sprintf.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_stpcpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_stpncpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strcat.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strcpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strncat.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_strncpy.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_vsnprintf.debug
OLD_FILES+=usr/tests/lib/libc/ssp/.debug/h_vsprintf.debug
OLD_DIRS+=usr/tests/lib/libc/stdio/.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/clearerr_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fflush_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fmemopen2_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fmemopen_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fopen_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/fputc_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/mktemp_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/popen_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/printf_test.debug
OLD_FILES+=usr/tests/lib/libc/stdio/.debug/scanf_test.debug
OLD_DIRS+=usr/tests/lib/libc/stdlib/.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/abs_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/atoi_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/div_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/exit_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/getenv_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/h_getopt.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/h_getopt_long.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/hsearch_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/posix_memalign_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/random_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/strtod_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/strtol_test.debug
OLD_FILES+=usr/tests/lib/libc/stdlib/.debug/system_test.debug
OLD_DIRS+=usr/tests/lib/libc/string/.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/memchr.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/memcpy.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/memmem.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/memset.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strcat.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strchr.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strcmp.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strcpy.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strcspn.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strerror.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strlen.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strpbrk.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strrchr.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/strspn.debug
OLD_FILES+=usr/tests/lib/libc/string/.debug/swab.debug
OLD_DIRS+=usr/tests/lib/libc/sys/.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/access_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/chroot_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/clock_gettime_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/connect_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/dup_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/fsync_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getcontext_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getgroups_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getitimer_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getlogin_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getpid_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getrusage_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/getsid_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/gettimeofday_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/issetugid_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/kevent_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/kill_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/link_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/listen_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mincore_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mkdir_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mkfifo_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mknod_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mlock_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mmap_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/mprotect_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgctl_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgget_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgrcv_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msgsnd_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/msync_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/nanosleep_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/pipe2_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/pipe_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/poll_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/revoke_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/select_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/setrlimit_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/setuid_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigaction_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigqueue_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/sigtimedwait_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/socketpair_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/stat_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/timer_create_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/truncate_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/ucontext_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/umask_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/unlink_test.debug
OLD_FILES+=usr/tests/lib/libc/sys/.debug/write_test.debug
OLD_DIRS+=usr/tests/lib/libc/termios/.debug
OLD_FILES+=usr/tests/lib/libc/termios/.debug/tcsetpgrp_test.debug
OLD_DIRS+=usr/tests/lib/libc/tls/.debug
OLD_FILES+=usr/tests/lib/libc/tls/.debug/h_tls_dlopen.so.debug
OLD_FILES+=usr/tests/lib/libc/tls/.debug/libh_tls_dynamic.so.1.debug
OLD_FILES+=usr/tests/lib/libc/tls/.debug/tls_dlopen_test.debug
OLD_FILES+=usr/tests/lib/libc/tls/.debug/tls_dynamic_test.debug
OLD_DIRS+=usr/tests/lib/libc/ttyio/.debug
OLD_FILES+=usr/tests/lib/libc/ttyio/.debug/ttyio_test.debug
OLD_DIRS+=usr/tests/lib/libcrypt/.debug
OLD_FILES+=usr/tests/lib/libcrypt/.debug/crypt_tests.debug
OLD_DIRS+=usr/tests/lib/libmp/.debug
OLD_FILES+=usr/tests/lib/libmp/.debug/legacy_test.debug
OLD_DIRS+=usr/tests/lib/libnv/.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/dnv_tests.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nv_array_tests.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nv_tests.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_add_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_exists_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_free_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_get_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_move_test.debug
OLD_FILES+=usr/tests/lib/libnv/.debug/nvlist_send_recv_test.debug
OLD_DIRS+=usr/tests/lib/libpam/.debug
OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_ctype.debug
OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_readlinev.debug
OLD_FILES+=usr/tests/lib/libpam/.debug/t_openpam_readword.debug
OLD_DIRS+=usr/tests/lib/libproc/.debug
OLD_FILES+=usr/tests/lib/libproc/.debug/proc_test.debug
OLD_FILES+=usr/tests/lib/libproc/.debug/target_prog.debug
OLD_DIRS+=usr/tests/lib/librt/.debug
OLD_FILES+=usr/tests/lib/librt/.debug/sched_test.debug
OLD_FILES+=usr/tests/lib/librt/.debug/sem_test.debug
OLD_DIRS+=usr/tests/lib/libthr/.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/barrier_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/cond_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/condwait_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/detach_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/equal_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/fork_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/fpu_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/h_atexit.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/h_cancel.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/h_exit.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/h_resolv.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/join_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/kill_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/mutex_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/once_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/preempt_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/rwlock_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/sem_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/siglongjmp_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/sigmask_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/sigsuspend_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/sleep_test.debug
OLD_FILES+=usr/tests/lib/libthr/.debug/swapcontext_test.debug
OLD_DIRS+=usr/tests/lib/libthr/dlopen/.debug
OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/dlopen_test.debug
OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/h_pthread_dlopen.so.1.debug
OLD_FILES+=usr/tests/lib/libthr/dlopen/.debug/main_pthread_create_test.debug
OLD_DIRS+=usr/tests/lib/libutil/.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/flopen_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/grp_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/humanize_number_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/pidfile_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/trimdomain-nodomain_test.debug
OLD_FILES+=usr/tests/lib/libutil/.debug/trimdomain_test.debug
OLD_DIRS+=usr/tests/lib/libxo/.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/libenc_test.so.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_01.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_02.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_03.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_04.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_05.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_06.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_07.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_08.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_09.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_10.debug
OLD_FILES+=usr/tests/lib/libxo/.debug/test_11.debug
OLD_DIRS+=usr/tests/lib/msun/.debug
OLD_FILES+=usr/tests/lib/msun/.debug/acos_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/asin_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/atan_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/cbrt_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/ceil_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/cos_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/cosh_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/erf_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/exp_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/fmod_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/infinity_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/ldexp_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/log_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/pow_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/precision_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/round_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/scalbn_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/sin_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/sinh_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/sqrt_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/tan_test.debug
OLD_FILES+=usr/tests/lib/msun/.debug/tanh_test.debug
OLD_DIRS+=usr/tests/libexec/rtld-elf/.debug
OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/ld_library_pathfds.debug
OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/libpythagoras.so.0.debug
OLD_FILES+=usr/tests/libexec/rtld-elf/.debug/target.debug
OLD_DIRS+=usr/tests/sbin/devd/.debug
OLD_FILES+=usr/tests/sbin/devd/.debug/client_test.debug
OLD_DIRS+=usr/tests/sbin/dhclient/.debug
OLD_FILES+=usr/tests/sbin/dhclient/.debug/option-domain-search_test.debug
OLD_DIRS+=usr/tests/share/examples/tests/atf/.debug
OLD_FILES+=usr/tests/share/examples/tests/atf/.debug/printf_test.debug
OLD_DIRS+=usr/tests/share/examples/tests/plain/.debug
OLD_FILES+=usr/tests/share/examples/tests/plain/.debug/printf_test.debug
OLD_DIRS+=usr/tests/sys/aio/.debug
OLD_FILES+=usr/tests/sys/aio/.debug/aio_kqueue_test.debug
OLD_FILES+=usr/tests/sys/aio/.debug/aio_test.debug
OLD_FILES+=usr/tests/sys/aio/.debug/lio_kqueue_test.debug
OLD_DIRS+=usr/tests/sys/fifo/.debug
OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_create.debug
OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_io.debug
OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_misc.debug
OLD_FILES+=usr/tests/sys/fifo/.debug/fifo_open.debug
OLD_DIRS+=usr/tests/sys/file/.debug
OLD_FILES+=usr/tests/sys/file/.debug/closefrom_test.debug
OLD_FILES+=usr/tests/sys/file/.debug/dup_test.debug
OLD_FILES+=usr/tests/sys/file/.debug/fcntlflags_test.debug
OLD_FILES+=usr/tests/sys/file/.debug/flock_helper.debug
OLD_FILES+=usr/tests/sys/file/.debug/ftruncate_test.debug
OLD_FILES+=usr/tests/sys/file/.debug/newfileops_on_fork_test.debug
OLD_DIRS+=usr/tests/sys/kern/.debug
OLD_FILES+=usr/tests/sys/kern/.debug/kern_descrip_test.debug
OLD_FILES+=usr/tests/sys/kern/.debug/ptrace_test.debug
OLD_FILES+=usr/tests/sys/kern/.debug/unix_seqpacket_test.debug
OLD_DIRS+=usr/tests/sys/kern/execve/.debug
OLD_FILES+=usr/tests/sys/kern/execve/.debug/execve_helper.debug
OLD_FILES+=usr/tests/sys/kern/execve/.debug/good_aout.debug
OLD_DIRS+=usr/tests/sys/kqueue/.debug
OLD_FILES+=usr/tests/sys/kqueue/.debug/kqtest.debug
OLD_DIRS+=usr/tests/sys/mqueue/.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest1.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest2.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest3.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest4.debug
OLD_FILES+=usr/tests/sys/mqueue/.debug/mqtest5.debug
OLD_DIRS+=usr/tests/sys/netinet/.debug
OLD_FILES+=usr/tests/sys/netinet/.debug/udp_dontroute.debug
OLD_DIRS+=usr/tests/sys/pjdfstest/.debug
OLD_FILES+=usr/tests/sys/pjdfstest/.debug/pjdfstest.debug
OLD_DIRS+=usr/tests/sys/vm/.debug
OLD_FILES+=usr/tests/sys/vm/.debug/mmap_test.debug
# 20151015: Rename files due to file-installed-as-dir bug
OLD_FILES+=usr/share/doc/legal/realtek
OLD_FILES+=usr/share/doc/legal/realtek/LICENSE
OLD_DIRS+=usr/share/doc/legal/realtek
OLD_DIRS+=usr/share/doc/legal/intel_ipw
OLD_FILES+=usr/share/doc/legal/intel_ipw/LICENSE
OLD_FILES+=usr/share/doc/legal/intel_iwn
OLD_FILES+=usr/share/doc/legal/intel_iwn/LICENSE
OLD_DIRS+=usr/share/doc/legal/intel_iwn
OLD_DIRS+=usr/share/doc/legal/intel_iwi
OLD_FILES+=usr/share/doc/legal/intel_iwi/LICENSE
OLD_DIRS+=usr/share/doc/legal/intel_wpi
OLD_FILES+=usr/share/doc/legal/intel_wpi/LICENSE
# 20151006: new libc++ import
OLD_FILES+=usr/include/c++/__tuple_03
OLD_FILES+=usr/include/c++/v1/__tuple_03
OLD_FILES+=usr/include/c++/v1/tr1/__tuple_03
# 20151006: new clang import which bumps version from 3.6.1 to 3.7.0.
OLD_FILES+=usr/lib/clang/3.6.1/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.6.1/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.6.1/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.6.1/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/altivec.h
OLD_FILES+=usr/lib/clang/3.6.1/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.6.1/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.6.1/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.6.1/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.6.1/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.6.1/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.1/include/xopintrin.h
OLD_DIRS+=usr/lib/clang/3.6.1/include
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.6.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.6.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.6.1/lib
OLD_DIRS+=usr/lib/clang/3.6.1
# 20150928: unused sgsmsg utility is removed
OLD_FILES+=usr/bin/sgsmsg
# 20150926: remove links to removed/unimplemented mbuf(9) macros
OLD_FILES+=usr/share/man/man9/MEXT_ADD_REF.9.gz
OLD_FILES+=usr/share/man/man9/MEXTFREE.9.gz
OLD_FILES+=usr/share/man/man9/MEXT_IS_REF.9.gz
OLD_FILES+=usr/share/man/man9/MEXT_REM_REF.9.gz
OLD_FILES+=usr/share/man/man9/MFREE.9.gz
# 20150818: *allocm() are gone in jemalloc 4.0.0
OLD_FILES+=usr/share/man/man3/allocm.3.gz
OLD_FILES+=usr/share/man/man3/dallocm.3.gz
OLD_FILES+=usr/share/man/man3/nallocm.3.gz
OLD_FILES+=usr/share/man/man3/rallocm.3.gz
OLD_FILES+=usr/share/man/man3/sallocm.3.gz
# 20150802: Remove netbsd's test on pw(8)
OLD_FILES+=usr/tests/usr.sbin/pw/pw_test
# 20150719: Remove libarchive.pc
OLD_FILES+=usr/libdata/pkgconfig/libarchive.pc
# 20150705: Rename DTrace provider man pages.
OLD_FILES+=usr/share/man/man4/dtrace-io.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-ip.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-proc.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-sched.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-tcp.4.gz
OLD_FILES+=usr/share/man/man4/dtrace-udp.4.gz
# 20150704: nvlist private headers no longer installed
OLD_FILES+=usr/include/sys/nv_impl.h
OLD_FILES+=usr/include/sys/nvlist_impl.h
OLD_FILES+=usr/include/sys/nvpair_impl.h
# 20150624
OLD_LIBS+=usr/lib/libugidfw.so.4
OLD_LIBS+=usr/lib32/libugidfw.so.4
# 20150604: Move nvlist man pages to section 9.
OLD_FILES+=usr/share/man/man3/libnv.3.gz
OLD_FILES+=usr/share/man/man3/nv.3.gz
OLD_FILES+=usr/share/man/man3/nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_stringf.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_add_stringv.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_clone.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_create.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_destroy.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_dump.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_empty.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_error.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_exists_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_fdump.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_flags.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_free_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_parent.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_get_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_move_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_move_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_move_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_move_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_next.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_pack.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_recv.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_send.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_set_error.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_size.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_take_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_unpack.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_xfer.3.gz
# 20150702: Remove duplicated nvlist includes.
OLD_FILES+=usr/include/dnv.h
OLD_FILES+=usr/include/nv.h
# 20150528: PCI IOV device driver methods moved to a separate kobj interface.
OLD_FILES+=usr/share/man/man9/PCI_ADD_VF.9.gz
OLD_FILES+=usr/share/man/man9/PCI_INIT_IOV.9.gz
OLD_FILES+=usr/share/man/man9/PCI_UNINIT_IOV.9.gz
# 20150525: new clang import which bumps version from 3.6.0 to 3.6.1.
OLD_FILES+=usr/lib/clang/3.6.0/include/__stddef_max_align_t.h
OLD_FILES+=usr/lib/clang/3.6.0/include/__wmmintrin_aes.h
OLD_FILES+=usr/lib/clang/3.6.0/include/__wmmintrin_pclmul.h
OLD_FILES+=usr/lib/clang/3.6.0/include/adxintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/altivec.h
OLD_FILES+=usr/lib/clang/3.6.0/include/ammintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/arm_acle.h
OLD_FILES+=usr/lib/clang/3.6.0/include/arm_neon.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx2intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512bwintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512erintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512fintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512vlbwintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avx512vlintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/avxintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/bmi2intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/bmiintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/cpuid.h
OLD_FILES+=usr/lib/clang/3.6.0/include/emmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/f16cintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/fma4intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/fmaintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/ia32intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/immintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/lzcntintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/mm3dnow.h
OLD_FILES+=usr/lib/clang/3.6.0/include/mm_malloc.h
OLD_FILES+=usr/lib/clang/3.6.0/include/mmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/module.modulemap
OLD_FILES+=usr/lib/clang/3.6.0/include/nmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/pmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/popcntintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/prfchwintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/rdseedintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/rtmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/shaintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/smmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/tbmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/tmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/wmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/x86intrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/xmmintrin.h
OLD_FILES+=usr/lib/clang/3.6.0/include/xopintrin.h
OLD_DIRS+=usr/lib/clang/3.6.0/include
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.6.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.6.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.6.0/lib
OLD_DIRS+=usr/lib/clang/3.6.0
# 20150521
OLD_FILES+=usr/bin/demandoc
OLD_FILES+=usr/share/man/man1/demandoc.1.gz
OLD_FILES+=usr/share/man/man3/mandoc.3.gz
OLD_FILES+=usr/share/man/man3/mandoc_headers.3.gz
# 20150520
OLD_FILES+=usr/lib/libheimsqlite.a
OLD_FILES+=usr/lib/libheimsqlite.so
OLD_LIBS+=usr/lib/libheimsqlite.so.11
OLD_FILES+=usr/lib/libheimsqlite_p.a
OLD_FILES+=usr/lib32/libheimsqlite.a
OLD_FILES+=usr/lib32/libheimsqlite.so
OLD_LIBS+=usr/lib32/libheimsqlite.so.11
OLD_FILES+=usr/lib32/libheimsqlite_p.a
# 20150518: tzdata2015c update
OLD_FILES+=usr/share/zoneinfo/America/Montreal
# 20150506
OLD_FILES+=usr/share/man/man9/NDHASGIANT.9.gz
# 20150504
OLD_FILES+=usr/share/examples/etc/libmap32.conf
OLD_FILES+=usr/include/bsdstat.h
OLD_LIBS+=usr/lib32/private/libatf-c++.so.2
OLD_LIBS+=usr/lib32/private/libbsdstat.so.1
OLD_LIBS+=usr/lib32/private/libheimipcs.so.11
OLD_LIBS+=usr/lib32/private/libsqlite3.so.0
OLD_LIBS+=usr/lib32/private/libunbound.so.5
OLD_LIBS+=usr/lib32/private/libatf-c.so.1
OLD_LIBS+=usr/lib32/private/libheimipcc.so.11
OLD_LIBS+=usr/lib32/private/libldns.so.5
OLD_LIBS+=usr/lib32/private/libssh.so.5
OLD_LIBS+=usr/lib32/private/libucl.so.1
OLD_DIRS+=usr/lib32/private
OLD_LIBS+=usr/lib/private/libatf-c++.so.2
OLD_LIBS+=usr/lib/private/libbsdstat.so.1
OLD_LIBS+=usr/lib/private/libheimipcs.so.11
OLD_LIBS+=usr/lib/private/libsqlite3.so.0
OLD_LIBS+=usr/lib/private/libunbound.so.5
OLD_LIBS+=usr/lib/private/libatf-c.so.1
OLD_LIBS+=usr/lib/private/libheimipcc.so.11
OLD_LIBS+=usr/lib/private/libldns.so.5
OLD_LIBS+=usr/lib/private/libssh.so.5
OLD_LIBS+=usr/lib/private/libucl.so.1
OLD_DIRS+=usr/lib/private
# 20150501
OLD_FILES+=usr/bin/soeliminate
OLD_FILES+=usr/share/man/man1/soeliminate.1.gz
# 20150501: Remove the nvlist_.*[vf] functions manpages.
OLD_FILES+=usr/share/man/man3/nvlist_addf_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addf_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_addv_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsf_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_existsv_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freef_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_null.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_freev_type.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getf_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_getv_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movef_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movef_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movef_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movef_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movev_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movev_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movev_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_movev_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takef_string.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_binary.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_bool.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_number.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/nvlist_takev_string.3.gz
# 20150429: remove never written documentation
OLD_FILES+=usr/share/doc/papers/hwpmc.ascii.gz
# 20150427: test/sys/kern/mmap_test moved to test/sys/vm/mmap_test
OLD_FILES+=usr/tests/sys/kern/mmap_test
# 20150422: zlib.c moved from net to libkern
OLD_FILES+=usr/include/net/zlib.h
OLD_FILES+=usr/include/net/zutil.h
# 20150418
OLD_FILES+=sbin/mount_oldnfs
OLD_FILES+=usr/share/man/man8/mount_oldnfs.8.gz
# 20150416: ALTQ moved to net/altq
OLD_FILES+=usr/include/altq/altq_rmclass_debug.h
OLD_FILES+=usr/include/altq/altq.h
OLD_FILES+=usr/include/altq/altq_cdnr.h
OLD_FILES+=usr/include/altq/altq_hfsc.h
OLD_FILES+=usr/include/altq/altq_priq.h
OLD_FILES+=usr/include/altq/altqconf.h
OLD_FILES+=usr/include/altq/altq_classq.h
OLD_FILES+=usr/include/altq/altq_red.h
OLD_FILES+=usr/include/altq/if_altq.h
OLD_FILES+=usr/include/altq/altq_var.h
OLD_FILES+=usr/include/altq/altq_rmclass.h
OLD_FILES+=usr/include/altq/altq_cbq.h
OLD_FILES+=usr/include/altq/altq_rio.h
OLD_DIRS+=usr/include/altq
# 20150330: ntp 4.2.8p1
OLD_FILES+=usr/share/doc/ntp/driver1.html
OLD_FILES+=usr/share/doc/ntp/driver10.html
OLD_FILES+=usr/share/doc/ntp/driver11.html
OLD_FILES+=usr/share/doc/ntp/driver12.html
OLD_FILES+=usr/share/doc/ntp/driver16.html
OLD_FILES+=usr/share/doc/ntp/driver18.html
OLD_FILES+=usr/share/doc/ntp/driver19.html
OLD_FILES+=usr/share/doc/ntp/driver2.html
OLD_FILES+=usr/share/doc/ntp/driver20.html
OLD_FILES+=usr/share/doc/ntp/driver22.html
OLD_FILES+=usr/share/doc/ntp/driver26.html
OLD_FILES+=usr/share/doc/ntp/driver27.html
OLD_FILES+=usr/share/doc/ntp/driver28.html
OLD_FILES+=usr/share/doc/ntp/driver29.html
OLD_FILES+=usr/share/doc/ntp/driver3.html
OLD_FILES+=usr/share/doc/ntp/driver30.html
OLD_FILES+=usr/share/doc/ntp/driver32.html
OLD_FILES+=usr/share/doc/ntp/driver33.html
OLD_FILES+=usr/share/doc/ntp/driver34.html
OLD_FILES+=usr/share/doc/ntp/driver35.html
OLD_FILES+=usr/share/doc/ntp/driver36.html
OLD_FILES+=usr/share/doc/ntp/driver37.html
OLD_FILES+=usr/share/doc/ntp/driver4.html
OLD_FILES+=usr/share/doc/ntp/driver5.html
OLD_FILES+=usr/share/doc/ntp/driver6.html
OLD_FILES+=usr/share/doc/ntp/driver7.html
OLD_FILES+=usr/share/doc/ntp/driver8.html
OLD_FILES+=usr/share/doc/ntp/driver9.html
OLD_FILES+=usr/share/doc/ntp/ldisc.html
OLD_FILES+=usr/share/doc/ntp/measure.html
OLD_FILES+=usr/share/doc/ntp/mx4200data.html
OLD_FILES+=usr/share/doc/ntp/notes.html
OLD_FILES+=usr/share/doc/ntp/patches.html
OLD_FILES+=usr/share/doc/ntp/porting.html
OLD_FILES+=usr/share/man/man1/sntp.1.gz
# 20150329
.if ${TARGET_ARCH} == "arm"
OLD_FILES+=usr/include/bootconfig.h
.endif
# 20150326
OLD_FILES+=usr/share/man/man1/pmcstudy.1.gz
# 20150315: new clang import which bumps version from 3.5.1 to 3.6.0.
OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.5.1/altivec.h
OLD_FILES+=usr/include/clang/3.5.1/ammintrin.h
OLD_FILES+=usr/include/clang/3.5.1/arm_acle.h
OLD_FILES+=usr/include/clang/3.5.1/arm_neon.h
OLD_FILES+=usr/include/clang/3.5.1/avx2intrin.h
OLD_FILES+=usr/include/clang/3.5.1/avxintrin.h
OLD_FILES+=usr/include/clang/3.5.1/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.5.1/bmiintrin.h
OLD_FILES+=usr/include/clang/3.5.1/cpuid.h
OLD_FILES+=usr/include/clang/3.5.1/emmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/f16cintrin.h
OLD_FILES+=usr/include/clang/3.5.1/fma4intrin.h
OLD_FILES+=usr/include/clang/3.5.1/fmaintrin.h
OLD_FILES+=usr/include/clang/3.5.1/ia32intrin.h
OLD_FILES+=usr/include/clang/3.5.1/immintrin.h
OLD_FILES+=usr/include/clang/3.5.1/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.5.1/mm3dnow.h
OLD_FILES+=usr/include/clang/3.5.1/mm_malloc.h
OLD_FILES+=usr/include/clang/3.5.1/mmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/module.modulemap
OLD_FILES+=usr/include/clang/3.5.1/nmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/pmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/popcntintrin.h
OLD_FILES+=usr/include/clang/3.5.1/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.5.1/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.5.1/rtmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/shaintrin.h
OLD_FILES+=usr/include/clang/3.5.1/smmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/tbmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/tmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/wmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/x86intrin.h
OLD_FILES+=usr/include/clang/3.5.1/xmmintrin.h
OLD_FILES+=usr/include/clang/3.5.1/xopintrin.h
OLD_DIRS+=usr/include/clang/3.5.1
OLD_DIRS+=usr/include/clang
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.5.1/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.5.1/lib
OLD_DIRS+=usr/lib/clang/3.5.1
# 20150302: binutils documentation distributed as a manpage
OLD_FILES+=usr/share/doc/binutils/as.txt
OLD_FILES+=usr/share/doc/binutils/ld.txt
OLD_DIRS+=usr/share/doc/binutils
# 20150222: Removed bcd(6) and ppt(6)
OLD_FILES+=usr/bin/bcd
OLD_FILES+=usr/bin/ppt
OLD_FILES+=usr/share/man/man6/bcd.6.gz
OLD_FILES+=usr/share/man/man6/ppt.6.gz
# 20150217: Removed remnants of ar(4) driver
OLD_FILES+=usr/include/dev/ic/hd64570.h
# 20150212: /usr/games moving into /usr/bin
OLD_FILES+=usr/games/bcd
OLD_FILES+=usr/games/caesar
OLD_FILES+=usr/games/factor
OLD_FILES+=usr/games/fortune
OLD_FILES+=usr/games/grdc
OLD_FILES+=usr/games/morse
OLD_FILES+=usr/games/number
OLD_FILES+=usr/games/pom
OLD_FILES+=usr/games/ppt
OLD_FILES+=usr/games/primes
OLD_FILES+=usr/games/random
OLD_FILES+=usr/games/rot13
OLD_FILES+=usr/games/strfile
OLD_FILES+=usr/games/unstr
OLD_DIRS+=usr/games
# 20150209: liblzma header
OLD_FILES+=usr/include/lzma/lzma.h
# 20150124: spl.9 and friends
OLD_FILES+=usr/share/man/man9/spl.9.gz
OLD_FILES+=usr/share/man/man9/spl0.9.gz
OLD_FILES+=usr/share/man/man9/splbio.9.gz
OLD_FILES+=usr/share/man/man9/splclock.9.gz
OLD_FILES+=usr/share/man/man9/splhigh.9.gz
OLD_FILES+=usr/share/man/man9/splimp.9.gz
OLD_FILES+=usr/share/man/man9/splnet.9.gz
OLD_FILES+=usr/share/man/man9/splsoftclock.9.gz
OLD_FILES+=usr/share/man/man9/splsofttty.9.gz
OLD_FILES+=usr/share/man/man9/splstatclock.9.gz
OLD_FILES+=usr/share/man/man9/spltty.9.gz
OLD_FILES+=usr/share/man/man9/splvm.9.gz
OLD_FILES+=usr/share/man/man9/splx.9.gz
# 20150118: toeplitz.c moved from netinet to net
OLD_FILES+=usr/include/netinet/toeplitz.h
# 20150118: new clang import which bumps version from 3.5.0 to 3.5.1.
OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.5.0/altivec.h
OLD_FILES+=usr/include/clang/3.5.0/ammintrin.h
OLD_FILES+=usr/include/clang/3.5.0/arm_acle.h
OLD_FILES+=usr/include/clang/3.5.0/arm_neon.h
OLD_FILES+=usr/include/clang/3.5.0/avx2intrin.h
OLD_FILES+=usr/include/clang/3.5.0/avxintrin.h
OLD_FILES+=usr/include/clang/3.5.0/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.5.0/bmiintrin.h
OLD_FILES+=usr/include/clang/3.5.0/cpuid.h
OLD_FILES+=usr/include/clang/3.5.0/emmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/f16cintrin.h
OLD_FILES+=usr/include/clang/3.5.0/fma4intrin.h
OLD_FILES+=usr/include/clang/3.5.0/fmaintrin.h
OLD_FILES+=usr/include/clang/3.5.0/ia32intrin.h
OLD_FILES+=usr/include/clang/3.5.0/immintrin.h
OLD_FILES+=usr/include/clang/3.5.0/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.5.0/mm3dnow.h
OLD_FILES+=usr/include/clang/3.5.0/mm_malloc.h
OLD_FILES+=usr/include/clang/3.5.0/mmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/module.modulemap
OLD_FILES+=usr/include/clang/3.5.0/nmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/pmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/popcntintrin.h
OLD_FILES+=usr/include/clang/3.5.0/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.5.0/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.5.0/rtmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/shaintrin.h
OLD_FILES+=usr/include/clang/3.5.0/smmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/tbmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/tmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/wmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/x86intrin.h
OLD_FILES+=usr/include/clang/3.5.0/xmmintrin.h
OLD_FILES+=usr/include/clang/3.5.0/xopintrin.h
OLD_DIRS+=usr/include/clang/3.5.0
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-arm.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-x86_64.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
OLD_DIRS+=usr/lib/clang/3.5.0/lib/freebsd
OLD_DIRS+=usr/lib/clang/3.5.0/lib
OLD_DIRS+=usr/lib/clang/3.5.0
# 20150102: removal of asr(4)
OLD_FILES+=usr/share/man/man4/asr.4.gz
# 20150102: removal of texinfo
OLD_FILES+=usr/bin/info
OLD_FILES+=usr/bin/infokey
OLD_FILES+=usr/bin/install-info
OLD_FILES+=usr/bin/makeinfo
OLD_FILES+=usr/bin/texindex
OLD_FILES+=usr/share/info/am-utils.info.gz
OLD_FILES+=usr/share/info/as.info.gz
OLD_FILES+=usr/share/info/binutils.info.gz
OLD_FILES+=usr/share/info/com_err.info.gz
OLD_FILES+=usr/share/info/cpp.info.gz
OLD_FILES+=usr/share/info/cppinternals.info.gz
OLD_FILES+=usr/share/info/diff.info.gz
OLD_FILES+=usr/share/info/dir
OLD_FILES+=usr/share/info/gcc.info.gz
OLD_FILES+=usr/share/info/gccint.info.gz
OLD_FILES+=usr/share/info/gdb.info.gz
OLD_FILES+=usr/share/info/gdbint.info.gz
OLD_FILES+=usr/share/info/gperf.info.gz
OLD_FILES+=usr/share/info/grep.info.gz
OLD_FILES+=usr/share/info/groff.info.gz
OLD_FILES+=usr/share/info/heimdal.info.gz
OLD_FILES+=usr/share/info/history.info.gz
OLD_FILES+=usr/share/info/info-stnd.info.gz
OLD_FILES+=usr/share/info/info.info.gz
OLD_FILES+=usr/share/info/ld.info.gz
OLD_FILES+=usr/share/info/regex.info.gz
OLD_FILES+=usr/share/info/rluserman.info.gz
OLD_FILES+=usr/share/info/stabs.info.gz
OLD_FILES+=usr/share/info/texinfo.info.gz
OLD_FILES+=usr/share/man/man1/info.1.gz
OLD_FILES+=usr/share/man/man1/infokey.1.gz
OLD_FILES+=usr/share/man/man1/install-info.1.gz
OLD_FILES+=usr/share/man/man1/makeinfo.1.gz
OLD_FILES+=usr/share/man/man1/texindex.1.gz
OLD_FILES+=usr/share/man/man5/info.5.gz
OLD_FILES+=usr/share/man/man5/texinfo.5.gz
OLD_DIRS+=usr/share/info
# 20141231: new clang import which bumps version from 3.4.1 to 3.5.0.
OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.4.1/altivec.h
OLD_FILES+=usr/include/clang/3.4.1/ammintrin.h
OLD_FILES+=usr/include/clang/3.4.1/arm_neon.h
OLD_FILES+=usr/include/clang/3.4.1/avx2intrin.h
OLD_FILES+=usr/include/clang/3.4.1/avxintrin.h
OLD_FILES+=usr/include/clang/3.4.1/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.4.1/bmiintrin.h
OLD_FILES+=usr/include/clang/3.4.1/cpuid.h
OLD_FILES+=usr/include/clang/3.4.1/emmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/f16cintrin.h
OLD_FILES+=usr/include/clang/3.4.1/fma4intrin.h
OLD_FILES+=usr/include/clang/3.4.1/fmaintrin.h
OLD_FILES+=usr/include/clang/3.4.1/immintrin.h
OLD_FILES+=usr/include/clang/3.4.1/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.4.1/mm3dnow.h
OLD_FILES+=usr/include/clang/3.4.1/mm_malloc.h
OLD_FILES+=usr/include/clang/3.4.1/mmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/module.map
OLD_FILES+=usr/include/clang/3.4.1/nmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/pmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/popcntintrin.h
OLD_FILES+=usr/include/clang/3.4.1/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.4.1/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.4.1/rtmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/shaintrin.h
OLD_FILES+=usr/include/clang/3.4.1/smmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/tbmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/tmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/wmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/x86intrin.h
OLD_FILES+=usr/include/clang/3.4.1/xmmintrin.h
OLD_FILES+=usr/include/clang/3.4.1/xopintrin.h
OLD_DIRS+=usr/include/clang/3.4.1
# 20141225: Remove gpib/ieee488
OLD_FILES+=usr/include/dev/ieee488/ibfoo_int.h
OLD_FILES+=usr/include/dev/ieee488/tnt4882.h
OLD_FILES+=usr/include/dev/ieee488/ugpib.h
OLD_FILES+=usr/include/dev/ieee488/upd7210.h
OLD_DIRS+=usr/include/dev/ieee488
OLD_FILES+=usr/include/gpib/gpib.h
OLD_DIRS+=usr/include/gpib
OLD_FILES+=usr/lib/libgpib.a
OLD_FILES+=usr/lib/libgpib_p.a
OLD_FILES+=usr/lib/libgpib.so
OLD_LIBS+=usr/lib/libgpib.so.3
OLD_FILES+=usr/lib/libgpib_p.a
OLD_FILES+=usr/lib32/libgpib.a
OLD_FILES+=usr/lib32/libgpib_p.a
OLD_FILES+=usr/lib32/libgpib.so
OLD_LIBS+=usr/lib32/libgpib.so.3
OLD_FILES+=usr/share/man/man3/gpib.3.gz
OLD_FILES+=usr/share/man/man3/ibclr.3.gz
OLD_FILES+=usr/share/man/man3/ibdev.3.gz
OLD_FILES+=usr/share/man/man3/ibdma.3.gz
OLD_FILES+=usr/share/man/man3/ibeos.3.gz
OLD_FILES+=usr/share/man/man3/ibeot.3.gz
OLD_FILES+=usr/share/man/man3/ibloc.3.gz
OLD_FILES+=usr/share/man/man3/ibonl.3.gz
OLD_FILES+=usr/share/man/man3/ibpad.3.gz
OLD_FILES+=usr/share/man/man3/ibrd.3.gz
OLD_FILES+=usr/share/man/man3/ibsad.3.gz
OLD_FILES+=usr/share/man/man3/ibsic.3.gz
OLD_FILES+=usr/share/man/man3/ibtmo.3.gz
OLD_FILES+=usr/share/man/man3/ibtrg.3.gz
OLD_FILES+=usr/share/man/man3/ibwrt.3.gz
OLD_FILES+=usr/share/man/man4/gpib.4.gz
OLD_FILES+=usr/share/man/man4/pcii.4.gz
OLD_FILES+=usr/share/man/man4/tnt4882.4.gz
# 20141224: libxo moved to /lib
OLD_LIBS+=usr/lib/libxo.so.0
# 20141223: remove in6_gif.h, in_gif.h and if_stf.h
OLD_FILES+=usr/include/net/if_stf.h
OLD_FILES+=usr/include/netinet/in_gif.h
OLD_FILES+=usr/include/netinet6/in6_gif.h
# 20141209: pw tests broken into a file per command
OLD_FILES+=usr/tests/usr.sbin/pw/pw_delete
OLD_FILES+=usr/tests/usr.sbin/pw/pw_modify
# 20141202: update to mandoc CVS 20141201
OLD_FILES+=usr.bin/preconv
OLD_FILES+=share/man/man1/preconv.1.gz
# 20141129: mrouted rc.d scripts removed from base
OLD_FILES+=etc/rc.d/mrouted
# 20141126: convert sbin/mdconfig/tests to ATF format tests
OLD_FILES+=usr/tests/sbin/mdconfig/legacy_test
OLD_FILES+=usr/tests/sbin/mdconfig/mdconfig.test
OLD_FILES+=usr/tests/sbin/mdconfig/run.pl
# 20141126: remove xform_ipip decapsulation fallback
OLD_FILES+=usr/include/netipsec/ipip_var.h
# 20141122: mandoc updated to 1.13.1
OLD_FILES+=usr/share/mdocml/external.png
# 20141111: SF_KQUEUE code removed
OLD_FILES+=usr/include/sys/sf_base.h
OLD_FILES+=usr/include/sys/sf_sync.h
# 20141109: faith/faithd removal
OLD_FILES+=etc/rc.d/faith
OLD_FILES+=usr/share/man/man4/faith.4.gz
OLD_FILES+=usr/share/man/man4/if_faith.4.gz
OLD_FILES+=usr/sbin/faithd
OLD_FILES+=usr/share/man/man8/faithd.8.gz
# 20141107: overhaul if_gre(4)
OLD_FILES+=usr/include/netinet/ip_gre.h
# 20141102: postrandom obsoleted by new /dev/random code
OLD_FILES+=etc/rc.d/postrandom
# 20141031: initrandom obsoleted by new /dev/random code
OLD_FILES+=etc/rc.d/initrandom
# 20141030: atf 0.21 import
OLD_FILES+=usr/share/man/man3/atf-c++-api.3.gz
# 20141028: debug files accidentally installed as directory name
OLD_FILES+=usr/lib/debug/usr/lib/i18n
OLD_FILES+=usr/lib/debug/usr/lib/private
OLD_FILES+=usr/lib/debug/usr/lib32/i18n
OLD_FILES+=usr/lib/debug/usr/lib32/private
# 20141015: OpenSSL 1.0.1j import
OLD_FILES+=usr/share/openssl/man/man3/CMS_sign_add1_signer.3.gz
# 20141003: libproc version bump
OLD_LIBS+=usr/lib/libproc.so.2
OLD_LIBS+=usr/lib32/libproc.so.2
# 20140922: sleepq_calc_signal_retval.9 and sleepq_catch_signals.9 removed
OLD_FILES+=usr/share/man/man9/sleepq_calc_signal_retval.9.gz
OLD_FILES+=usr/share/man/man9/sleepq_catch_signals.9.gz
# 20140917: hv_kvpd rc.d script removed in favor of devd configuration
OLD_FILES+=etc/rc.d/hv_kvpd
# 20140917: libnv was accidentally being installed to /usr/lib instead of /lib
OLD_LIBS+=usr/lib/libnv.so.0
# 20140829: rc.d/kerberos removed
OLD_FILES+=etc/rc.d/kerberos
# 20140827: tzdata2014f import
OLD_FILES+=usr/share/zoneinfo/Asia/Chongqing
OLD_FILES+=usr/share/zoneinfo/Asia/Harbin
OLD_FILES+=usr/share/zoneinfo/Asia/Kashgar
# 20140814: libopie version bump
OLD_LIBS+=usr/lib/libopie.so.7
OLD_LIBS+=usr/lib32/libopie.so.7
# 20140811: otp-sha renamed to otp-sha1
OLD_FILES+=usr/bin/otp-sha
OLD_FILES+=usr/share/man/man1/otp-sha.1.gz
# 20140807: Remove private lib files that should not be installed.
OLD_FILES+=usr/lib32/private/libatf-c.a
OLD_FILES+=usr/lib32/private/libatf-c.so
OLD_FILES+=usr/lib32/private/libatf-c_p.a
OLD_FILES+=usr/lib32/private/libatf-c++.a
OLD_FILES+=usr/lib32/private/libatf-c++.so
OLD_FILES+=usr/lib32/private/libatf-c++_p.a
OLD_FILES+=usr/lib32/private/libbsdstat.a
OLD_FILES+=usr/lib32/private/libbsdstat.so
OLD_FILES+=usr/lib32/private/libbsdstat_p.a
OLD_FILES+=usr/lib32/private/libheimipcc.a
OLD_FILES+=usr/lib32/private/libheimipcc.so
OLD_FILES+=usr/lib32/private/libheimipcc_p.a
OLD_FILES+=usr/lib32/private/libheimipcs.a
OLD_FILES+=usr/lib32/private/libheimipcs.so
OLD_FILES+=usr/lib32/private/libheimipcs_p.a
OLD_FILES+=usr/lib32/private/libldns.a
OLD_FILES+=usr/lib32/private/libldns.so
OLD_FILES+=usr/lib32/private/libldns_p.a
OLD_FILES+=usr/lib32/private/libssh.a
OLD_FILES+=usr/lib32/private/libssh.so
OLD_FILES+=usr/lib32/private/libssh_p.a
OLD_FILES+=usr/lib32/private/libunbound.a
OLD_FILES+=usr/lib32/private/libunbound.so
OLD_FILES+=usr/lib32/private/libunbound_p.a
OLD_FILES+=usr/lib32/private/libucl.a
OLD_FILES+=usr/lib32/private/libucl.so
OLD_FILES+=usr/lib32/private/libucl_p.a
OLD_FILES+=usr/lib/private/libatf-c.a
OLD_FILES+=usr/lib/private/libatf-c.so
OLD_FILES+=usr/lib/private/libatf-c_p.a
OLD_FILES+=usr/lib/private/libatf-c++.a
OLD_FILES+=usr/lib/private/libatf-c++.so
OLD_FILES+=usr/lib/private/libatf-c++_p.a
OLD_FILES+=usr/lib/private/libbsdstat.a
OLD_FILES+=usr/lib/private/libbsdstat.so
OLD_FILES+=usr/lib/private/libbsdstat_p.a
OLD_FILES+=usr/lib/private/libheimipcc.a
OLD_FILES+=usr/lib/private/libheimipcc.so
OLD_FILES+=usr/lib/private/libheimipcc_p.a
OLD_FILES+=usr/lib/private/libheimipcs.a
OLD_FILES+=usr/lib/private/libheimipcs.so
OLD_FILES+=usr/lib/private/libheimipcs_p.a
OLD_FILES+=usr/lib/private/libldns.a
OLD_FILES+=usr/lib/private/libldns.so
OLD_FILES+=usr/lib/private/libldns_p.a
OLD_FILES+=usr/lib/private/libssh.a
OLD_FILES+=usr/lib/private/libssh.so
OLD_FILES+=usr/lib/private/libssh_p.a
OLD_FILES+=usr/lib/private/libunbound.a
OLD_FILES+=usr/lib/private/libunbound.so
OLD_FILES+=usr/lib/private/libunbound_p.a
OLD_FILES+=usr/lib/private/libucl.a
OLD_FILES+=usr/lib/private/libucl.so
OLD_FILES+=usr/lib/private/libucl_p.a
# 20140803: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/pmap_change_wiring.9.gz
# 20140731
OLD_FILES+=usr/share/man/man9/SYSCTL_ADD_OID.9.gz
# 20140728: libsbuf restored to old version.
OLD_LIBS+=lib/libsbuf.so.7
OLD_LIBS+=usr/lib32/libsbuf.so.7
# 20140728: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/VOP_GETVOBJECT.9.gz
OLD_FILES+=usr/share/man/man9/VOP_CREATEVOBJECT.9.gz
OLD_FILES+=usr/share/man/man9/VOP_DESTROYVOBJECT.9.gz
# 20140723: renamed to PCBGROUP.9
OLD_FILES+=usr/share/man/man9/PCBGROUPS.9.gz
# 20140722: browse_packages_ftp.sh removed
OLD_FILES+=usr/share/examples/bsdconfig/browse_packages_ftp.sh
# 20140718: Remove obsolete man pages
OLD_FILES+=usr/share/man/man9/zero_copy.9.gz
OLD_FILES+=usr/share/man/man9/zero_copy_sockets.9.gz
# 20140718: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/pmap_page_protect.9.gz
# 20140717: Remove an obsolete man page
OLD_FILES+=usr/share/man/man9/pmap_clear_reference.9.gz
# 20140716: Remove an incorrectly named man page
OLD_FILES+=usr/share/man/man9/pmap_ts_modified.9.gz
# 20140712: Removal of bsd.dtrace.mk
OLD_FILES+=usr/share/mk/bsd.dtrace.mk
# 20140705: turn libreadline into an internal lib
OLD_LIBS+=lib/libreadline.so.8
OLD_FILES+=usr/lib/libreadline.a
OLD_FILES+=usr/lib/libreadline_p.a
OLD_FILES+=usr/lib/libreadline.so
OLD_FILES+=usr/lib/libhistory.a
OLD_FILES+=usr/lib/libhistory_p.a
OLD_FILES+=usr/lib/libhistory.so
OLD_LIBS+=usr/lib/libhistory.so.8
OLD_FILES+=usr/lib32/libhistory.a
OLD_FILES+=usr/lib32/libhistory.so
OLD_LIBS+=usr/lib32/libhistory.so.8
OLD_FILES+=usr/lib32/libhistory_p.a
OLD_FILES+=usr/lib32/libreadline.a
OLD_FILES+=usr/lib32/libreadline.so
OLD_LIBS+=usr/lib32/libreadline.so.8
OLD_FILES+=usr/lib32/libreadline_p.a
OLD_FILES+=usr/include/readline/chardefs.h
OLD_FILES+=usr/include/readline/history.h
OLD_FILES+=usr/include/readline/keymaps.h
OLD_FILES+=usr/include/readline/readline.h
OLD_FILES+=usr/include/readline/tilde.h
OLD_FILES+=usr/include/readline/rlconf.h
OLD_FILES+=usr/include/readline/rlstdc.h
OLD_FILES+=usr/include/readline/rltypedefs.h
OLD_FILES+=usr/include/readline/rltypedefs.h
OLD_DIRS+=usr/include/readline
OLD_FILES+=usr/share/info/readline.info.gz
OLD_FILES+=usr/share/man/man3/readline.3.gz
OLD_FILES+=usr/share/man/man3/rlhistory.3.gz
# 20140625: csup removal
OLD_FILES+=usr/bin/csup
OLD_FILES+=usr/bin/cpasswd
OLD_FILES+=usr/share/man/man1/csup.1.gz
OLD_FILES+=usr/share/man/man1/cpasswd.1.gz
OLD_FILES+=usr/share/examples/cvsup/README
OLD_FILES+=usr/share/examples/cvsup/cvs-supfile
OLD_FILES+=usr/share/examples/cvsup/stable-supfile
OLD_FILES+=usr/share/examples/cvsup/standard-supfile
OLD_DIRS+=usr/share/examples/cvsup
# 20140614: send-pr removal
OLD_FILES+=usr/bin/sendbug
OLD_FILES+=usr/share/info/send-pr.info.gz
OLD_FILES+=usr/share/man/man1/send-pr.1.gz
OLD_FILES+=usr/share/man/man1/sendbug.1.gz
OLD_FILES+=etc/gnats/freefall
OLD_DIRS+=etc/gnats
# 20140512: new clang import which bumps version from 3.4 to 3.4.1.
OLD_FILES+=usr/include/clang/3.4/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.4/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.4/altivec.h
OLD_FILES+=usr/include/clang/3.4/ammintrin.h
OLD_FILES+=usr/include/clang/3.4/avx2intrin.h
OLD_FILES+=usr/include/clang/3.4/avxintrin.h
OLD_FILES+=usr/include/clang/3.4/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.4/bmiintrin.h
OLD_FILES+=usr/include/clang/3.4/cpuid.h
OLD_FILES+=usr/include/clang/3.4/emmintrin.h
OLD_FILES+=usr/include/clang/3.4/f16cintrin.h
OLD_FILES+=usr/include/clang/3.4/fma4intrin.h
OLD_FILES+=usr/include/clang/3.4/fmaintrin.h
OLD_FILES+=usr/include/clang/3.4/immintrin.h
OLD_FILES+=usr/include/clang/3.4/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.4/mm3dnow.h
OLD_FILES+=usr/include/clang/3.4/mm_malloc.h
OLD_FILES+=usr/include/clang/3.4/mmintrin.h
OLD_FILES+=usr/include/clang/3.4/module.map
OLD_FILES+=usr/include/clang/3.4/nmmintrin.h
OLD_FILES+=usr/include/clang/3.4/pmmintrin.h
OLD_FILES+=usr/include/clang/3.4/popcntintrin.h
OLD_FILES+=usr/include/clang/3.4/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.4/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.4/rtmintrin.h
OLD_FILES+=usr/include/clang/3.4/shaintrin.h
OLD_FILES+=usr/include/clang/3.4/smmintrin.h
OLD_FILES+=usr/include/clang/3.4/tbmintrin.h
OLD_FILES+=usr/include/clang/3.4/tmmintrin.h
OLD_FILES+=usr/include/clang/3.4/wmmintrin.h
OLD_FILES+=usr/include/clang/3.4/x86intrin.h
OLD_FILES+=usr/include/clang/3.4/xmmintrin.h
OLD_FILES+=usr/include/clang/3.4/xopintrin.h
OLD_FILES+=usr/include/clang/3.4/arm_neon.h
OLD_FILES+=usr/include/clang/3.4/module.map
OLD_DIRS+=usr/include/clang/3.4
# 20140505: Bogusly installing src.opts.mk
OLD_FILES+=usr/share/mk/src.opts.mk
# 20140505: Reject PR kern/187551
OLD_FILES+=usr/tests/sbin/ifconfig/fibs_test
# 20140502: Removal of lindev(4)
OLD_FILES+=usr/share/man/man4/lindev.4.gz
# 20140425
OLD_FILES+=usr/lib/libssp_p.a
OLD_FILES+=usr/lib/libstand_p.a
OLD_FILES+=usr/lib32/libssp_p.a
OLD_FILES+=usr/lib32/libstand_p.a
# 20140314: AppleTalk
OLD_DIRS+=usr/include/netatalk
OLD_FILES+=usr/include/netatalk/aarp.h
OLD_FILES+=usr/include/netatalk/at.h
OLD_FILES+=usr/include/netatalk/at_extern.h
OLD_FILES+=usr/include/netatalk/at_var.h
OLD_FILES+=usr/include/netatalk/ddp.h
OLD_FILES+=usr/include/netatalk/ddp_pcb.h
OLD_FILES+=usr/include/netatalk/ddp_var.h
OLD_FILES+=usr/include/netatalk/endian.h
OLD_FILES+=usr/include/netatalk/phase2.h
# 20140314: Remove IPX/SPX
OLD_LIBS+=lib/libipx.so.5
OLD_FILES+=usr/include/netipx/ipx.h
OLD_FILES+=usr/include/netipx/ipx_if.h
OLD_FILES+=usr/include/netipx/ipx_pcb.h
OLD_FILES+=usr/include/netipx/ipx_var.h
OLD_FILES+=usr/include/netipx/spx.h
OLD_FILES+=usr/include/netipx/spx_debug.h
OLD_FILES+=usr/include/netipx/spx_timer.h
OLD_FILES+=usr/include/netipx/spx_var.h
OLD_DIRS+=usr/include/netipx
OLD_FILES+=usr/lib/libipx.a
OLD_FILES+=usr/lib/libipx.so
OLD_FILES+=usr/lib/libipx_p.a
OLD_FILES+=usr/lib32/libipx.a
OLD_FILES+=usr/lib32/libipx.so
OLD_LIBS+=usr/lib32/libipx.so.5
OLD_FILES+=usr/lib32/libipx_p.a
OLD_FILES+=usr/sbin/IPXrouted
OLD_FILES+=usr/share/man/man3/ipx.3.gz
OLD_FILES+=usr/share/man/man3/ipx_addr.3.gz
OLD_FILES+=usr/share/man/man3/ipx_ntoa.3.gz
OLD_FILES+=usr/share/man/man4/ef.4.gz
OLD_FILES+=usr/share/man/man4/if_ef.4.gz
OLD_FILES+=usr/share/man/man8/IPXrouted.8.gz
# 20140314: bsdconfig usermgmt rewrite
OLD_FILES+=usr/libexec/bsdconfig/070.usermgmt/userinput
# 20140307: bsdconfig groupmgmt rewrite
OLD_FILES+=usr/libexec/bsdconfig/070.usermgmt/groupinput
# 20140223: Remove libyaml
OLD_FILES+=usr/lib/private/libyaml.a
OLD_FILES+=usr/lib/private/libyaml.so
OLD_LIBS+=usr/lib/private/libyaml.so.1
OLD_FILES+=usr/lib/private/libyaml_p.a
OLD_FILES+=usr/lib32/private/libyaml.a
OLD_FILES+=usr/lib32/private/libyaml.so
OLD_LIBS+=usr/lib32/private/libyaml.so.1
OLD_FILES+=usr/lib32/private/libyaml_p.a
# 20140216: new clang import which bumps version from 3.3 to 3.4.
OLD_FILES+=usr/bin/llvm-prof
OLD_FILES+=usr/include/clang/3.3/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.3/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.3/altivec.h
OLD_FILES+=usr/include/clang/3.3/ammintrin.h
OLD_FILES+=usr/include/clang/3.3/avx2intrin.h
OLD_FILES+=usr/include/clang/3.3/avxintrin.h
OLD_FILES+=usr/include/clang/3.3/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.3/bmiintrin.h
OLD_FILES+=usr/include/clang/3.3/cpuid.h
OLD_FILES+=usr/include/clang/3.3/emmintrin.h
OLD_FILES+=usr/include/clang/3.3/f16cintrin.h
OLD_FILES+=usr/include/clang/3.3/fma4intrin.h
OLD_FILES+=usr/include/clang/3.3/fmaintrin.h
OLD_FILES+=usr/include/clang/3.3/immintrin.h
OLD_FILES+=usr/include/clang/3.3/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.3/mm3dnow.h
OLD_FILES+=usr/include/clang/3.3/mm_malloc.h
OLD_FILES+=usr/include/clang/3.3/mmintrin.h
OLD_FILES+=usr/include/clang/3.3/module.map
OLD_FILES+=usr/include/clang/3.3/nmmintrin.h
OLD_FILES+=usr/include/clang/3.3/pmmintrin.h
OLD_FILES+=usr/include/clang/3.3/popcntintrin.h
OLD_FILES+=usr/include/clang/3.3/prfchwintrin.h
OLD_FILES+=usr/include/clang/3.3/rdseedintrin.h
OLD_FILES+=usr/include/clang/3.3/rtmintrin.h
OLD_FILES+=usr/include/clang/3.3/smmintrin.h
OLD_FILES+=usr/include/clang/3.3/tmmintrin.h
OLD_FILES+=usr/include/clang/3.3/wmmintrin.h
OLD_FILES+=usr/include/clang/3.3/x86intrin.h
OLD_FILES+=usr/include/clang/3.3/xmmintrin.h
OLD_FILES+=usr/include/clang/3.3/xopintrin.h
OLD_FILES+=usr/share/man/man1/llvm-prof.1.gz
OLD_FILES+=usr/share/man/man1/llvm-ranlib.1.gz
OLD_DIRS+=usr/include/clang/3.3
# 20140216: nve(4) removed
OLD_FILES+=usr/share/man/man4/if_nve.4.gz
OLD_FILES+=usr/share/man/man4/nve.4.gz
# 20140205: Open Firmware device moved
OLD_FILES+=usr/include/dev/ofw/ofw_nexus.h
# 20140128: libelf and libdwarf import
OLD_LIBS+=usr/lib/libelf.so.1
OLD_LIBS+=usr/lib32/libelf.so.1
OLD_LIBS+=usr/lib/libdwarf.so.3
OLD_LIBS+=usr/lib32/libdwarf.so.3
# 20140123: apicvar header moved to x86
OLD_FILES+=usr/include/machine/apicvar.h
# 20131215: libcam version bumped
OLD_LIBS+=lib/libcam.so.6 usr/lib32/libcam.so.6
# 20131202: libcapsicum and libcasper moved to /lib/
OLD_LIBS+=usr/lib/libcapsicum.so.0
OLD_LIBS+=usr/lib/libcasper.so.0
# 20131109: extattr(2) mlinks fixed
OLD_FILES+=usr/share/man/man2/extattr_delete_list.2.gz
OLD_FILES+=usr/share/man/man2/extattr_get_list.2.gz
# 20131107: example files removed
OLD_FILES+=usr/share/examples/libusb20/aux.c
OLD_FILES+=usr/share/examples/libusb20/aux.h
# 20131105: tzdata 2013h import
OLD_FILES+=usr/share/zoneinfo/America/Shiprock
OLD_FILES+=usr/share/zoneinfo/Antarctica/South_Pole
# 20131103: WITH_LIBICONV_COMPAT removal
OLD_FILES+=usr/include/_libiconv_compat.h
OLD_FILES+=usr/lib/libiconv.a
OLD_FILES+=usr/lib/libiconv.so
OLD_FILES+=usr/lib/libiconv.so.3
OLD_FILES+=usr/lib/libiconv_p.a
OLD_FILES+=usr/lib32/libiconv.a
OLD_FILES+=usr/lib32/libiconv.so
OLD_FILES+=usr/lib32/libiconv.so.3
OLD_FILES+=usr/lib32/libiconv_p.a
# 20131103: removal of utxrm(8), use 'utx rm' instead.
OLD_FILES+=usr/sbin/utxrm
OLD_FILES+=usr/share/man/man8/utxrm.8.gz
# 20131031: pkg_install has been removed
OLD_FILES+=etc/periodic/daily/220.backup-pkgdb
OLD_FILES+=etc/periodic/daily/490.status-pkg-changes
OLD_FILES+=etc/periodic/security/460.chkportsum
OLD_FILES+=etc/periodic/weekly/400.status-pkg
OLD_FILES+=usr/sbin/pkg_add
OLD_FILES+=usr/sbin/pkg_create
OLD_FILES+=usr/sbin/pkg_delete
OLD_FILES+=usr/sbin/pkg_info
OLD_FILES+=usr/sbin/pkg_updating
OLD_FILES+=usr/sbin/pkg_version
OLD_FILES+=usr/share/man/man1/pkg_add.1.gz
OLD_FILES+=usr/share/man/man1/pkg_create.1.gz
OLD_FILES+=usr/share/man/man1/pkg_delete.1.gz
OLD_FILES+=usr/share/man/man1/pkg_info.1.gz
OLD_FILES+=usr/share/man/man1/pkg_updating.1.gz
OLD_FILES+=usr/share/man/man1/pkg_version.1.gz
# 20131030: /etc/keys moved to /usr/share/keys
OLD_DIRS+=etc/keys
OLD_DIRS+=etc/keys/pkg
OLD_DIRS+=etc/keys/pkg/revoked
OLD_DIRS+=etc/keys/pkg/trusted
OLD_FILES+=etc/keys/pkg/trusted/pkg.freebsd.org.2013102301
# 20131028: ng_fec(4) removed
OLD_FILES+=usr/include/netgraph/ng_fec.h
OLD_FILES+=usr/share/man/man4/ng_fec.4.gz
# 20131027: header moved
OLD_FILES+=usr/include/net/pf_mtag.h
# 20131023: remove never used iscsi directory
OLD_DIRS+=usr/share/examples/iscsi
# 20131021: isf(4) removed
OLD_FILES+=usr/sbin/isfctl
OLD_FILES+=usr/share/man/man4/isf.4.gz
OLD_FILES+=usr/share/man/man8/isfctl.8.gz
# 20131014: libbsdyml becomes private
OLD_FILES+=usr/lib/libbsdyml.a
OLD_FILES+=usr/lib/libbsdyml.so
OLD_LIBS+=usr/lib/libbsdyml.so.0
OLD_FILES+=usr/lib/libbsdyml_p.a
OLD_FILES+=usr/lib32/libbsdyml.a
OLD_FILES+=usr/lib32/libbsdyml.so
OLD_LIBS+=usr/lib32/libbsdyml.so.0
OLD_FILES+=usr/lib32/libbsdyml_p.a
OLD_FILES+=usr/share/man/man3/libbsdyml.3.gz
OLD_FILES+=usr/include/bsdyml.h
# 20131013: Removal of the ATF tools
OLD_FILES+=etc/atf/FreeBSD.conf
OLD_FILES+=etc/atf/atf-run.hooks
OLD_FILES+=etc/atf/common.conf
OLD_FILES+=usr/bin/atf-config
OLD_FILES+=usr/bin/atf-report
OLD_FILES+=usr/bin/atf-run
OLD_FILES+=usr/bin/atf-version
OLD_FILES+=usr/share/atf/atf-run.hooks
OLD_FILES+=usr/share/examples/atf/atf-run.hooks
OLD_FILES+=usr/share/examples/atf/tests-results.css
OLD_FILES+=usr/share/man/man1/atf-config.1.gz
OLD_FILES+=usr/share/man/man1/atf-report.1.gz
OLD_FILES+=usr/share/man/man1/atf-run.1.gz
OLD_FILES+=usr/share/man/man1/atf-version.1.gz
OLD_FILES+=usr/share/man/man5/atf-formats.5.gz
OLD_FILES+=usr/share/xml/atf/tests-results.dtd
OLD_FILES+=usr/share/xsl/atf/tests-results.xsl
# 20131009: freebsd-version moved from /libexec to /bin
OLD_FILES+=libexec/freebsd-version
# 20131001: ar and ranlib from binutils not used
OLD_FILES+=usr/bin/gnu-ar
OLD_FILES+=usr/bin/gnu-ranlib
OLD_FILES+=usr/share/man/man1/gnu-ar.1.gz
OLD_FILES+=usr/share/man/man1/gnu-ranlib.1.gz
# 20130930: BIND removed from base
OLD_FILES+=etc/mtree/BIND.chroot.dist
OLD_FILES+=etc/namedb
OLD_FILES+=etc/periodic/daily/470.status-named
OLD_FILES+=usr/bin/dig
OLD_FILES+=usr/bin/nslookup
OLD_FILES+=usr/bin/nsupdate
OLD_DIRS+=usr/include/lwres
OLD_FILES+=usr/include/lwres/context.h
OLD_FILES+=usr/include/lwres/int.h
OLD_FILES+=usr/include/lwres/ipv6.h
OLD_FILES+=usr/include/lwres/lang.h
OLD_FILES+=usr/include/lwres/list.h
OLD_FILES+=usr/include/lwres/lwbuffer.h
OLD_FILES+=usr/include/lwres/lwpacket.h
OLD_FILES+=usr/include/lwres/lwres.h
OLD_FILES+=usr/include/lwres/net.h
OLD_FILES+=usr/include/lwres/netdb.h
OLD_FILES+=usr/include/lwres/platform.h
OLD_FILES+=usr/include/lwres/result.h
OLD_FILES+=usr/include/lwres/string.h
OLD_FILES+=usr/include/lwres/version.h
OLD_FILES+=usr/lib/liblwres.a
OLD_FILES+=usr/lib/liblwres.so
OLD_LIBS+=usr/lib/liblwres.so.90
OLD_FILES+=usr/lib/liblwres_p.a
OLD_FILES+=usr/sbin/arpaname
OLD_FILES+=usr/sbin/ddns-confgen
OLD_FILES+=usr/sbin/dnssec-dsfromkey
OLD_FILES+=usr/sbin/dnssec-keyfromlabel
OLD_FILES+=usr/sbin/dnssec-keygen
OLD_FILES+=usr/sbin/dnssec-revoke
OLD_FILES+=usr/sbin/dnssec-settime
OLD_FILES+=usr/sbin/dnssec-signzone
OLD_FILES+=usr/sbin/dnssec-verify
OLD_FILES+=usr/sbin/genrandom
OLD_FILES+=usr/sbin/isc-hmac-fixup
OLD_FILES+=usr/sbin/lwresd
OLD_FILES+=usr/sbin/named
OLD_FILES+=usr/sbin/named-checkconf
OLD_FILES+=usr/sbin/named-checkzone
OLD_FILES+=usr/sbin/named-compilezone
OLD_FILES+=usr/sbin/named-journalprint
OLD_FILES+=usr/sbin/named.reconfig
OLD_FILES+=usr/sbin/named.reload
OLD_FILES+=usr/sbin/nsec3hash
OLD_FILES+=usr/sbin/rndc
OLD_FILES+=usr/sbin/rndc-confgen
OLD_DIRS+=usr/share/doc/bind9
OLD_FILES+=usr/share/doc/bind9/CHANGES
OLD_FILES+=usr/share/doc/bind9/COPYRIGHT
OLD_FILES+=usr/share/doc/bind9/FAQ
OLD_FILES+=usr/share/doc/bind9/HISTORY
OLD_FILES+=usr/share/doc/bind9/README
OLD_DIRS+=usr/share/doc/bind9/arm
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch01.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch02.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch03.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch04.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch05.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch06.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch07.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch08.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch09.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.ch10.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.html
OLD_FILES+=usr/share/doc/bind9/arm/Bv9ARM.pdf
OLD_FILES+=usr/share/doc/bind9/arm/man.arpaname.html
OLD_FILES+=usr/share/doc/bind9/arm/man.ddns-confgen.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dig.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-dsfromkey.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-keyfromlabel.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-keygen.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-revoke.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-settime.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-signzone.html
OLD_FILES+=usr/share/doc/bind9/arm/man.dnssec-verify.html
OLD_FILES+=usr/share/doc/bind9/arm/man.genrandom.html
OLD_FILES+=usr/share/doc/bind9/arm/man.host.html
OLD_FILES+=usr/share/doc/bind9/arm/man.isc-hmac-fixup.html
OLD_FILES+=usr/share/doc/bind9/arm/man.named-checkconf.html
OLD_FILES+=usr/share/doc/bind9/arm/man.named-checkzone.html
OLD_FILES+=usr/share/doc/bind9/arm/man.named-journalprint.html
OLD_FILES+=usr/share/doc/bind9/arm/man.named.html
OLD_FILES+=usr/share/doc/bind9/arm/man.nsec3hash.html
OLD_FILES+=usr/share/doc/bind9/arm/man.nsupdate.html
OLD_FILES+=usr/share/doc/bind9/arm/man.rndc-confgen.html
OLD_FILES+=usr/share/doc/bind9/arm/man.rndc.conf.html
OLD_FILES+=usr/share/doc/bind9/arm/man.rndc.html
OLD_DIRS+=usr/share/doc/bind9/misc
OLD_FILES+=usr/share/doc/bind9/misc/dnssec
OLD_FILES+=usr/share/doc/bind9/misc/format-options.pl
OLD_FILES+=usr/share/doc/bind9/misc/ipv6
OLD_FILES+=usr/share/doc/bind9/misc/migration
OLD_FILES+=usr/share/doc/bind9/misc/migration-4to9
OLD_FILES+=usr/share/doc/bind9/misc/options
OLD_FILES+=usr/share/doc/bind9/misc/rfc-compliance
OLD_FILES+=usr/share/doc/bind9/misc/roadmap
OLD_FILES+=usr/share/doc/bind9/misc/sdb
OLD_FILES+=usr/share/doc/bind9/misc/sort-options.pl
OLD_FILES+=usr/share/man/man1/arpaname.1.gz
OLD_FILES+=usr/share/man/man1/dig.1.gz
OLD_FILES+=usr/share/man/man1/nslookup.1.gz
OLD_FILES+=usr/share/man/man1/nsupdate.1.gz
OLD_FILES+=usr/share/man/man3/lwres.3.gz
OLD_FILES+=usr/share/man/man3/lwres_addr_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_add.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_back.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_clear.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_first.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_forward.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_getmem.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint16.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint32.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_getuint8.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_init.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_invalidate.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_putmem.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint16.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint32.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_putuint8.3.gz
OLD_FILES+=usr/share/man/man3/lwres_buffer_subtract.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_clear.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_get.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_init.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_conf_print.3.gz
OLD_FILES+=usr/share/man/man3/lwres_config.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_allocmem.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_create.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_destroy.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_freemem.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_initserial.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_nextserial.3.gz
OLD_FILES+=usr/share/man/man3/lwres_context_sendrecv.3.gz
OLD_FILES+=usr/share/man/man3/lwres_endhostent.3.gz
OLD_FILES+=usr/share/man/man3/lwres_endhostent_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_freeaddrinfo.3.gz
OLD_FILES+=usr/share/man/man3/lwres_freehostent.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabn.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnrequest_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gabnresponse_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gai_strerror.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getaddrinfo.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getaddrsbyname.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyaddr_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyname.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyname2.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostbyname_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostent.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gethostent_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getipnode.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getipnodebyaddr.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getipnodebyname.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getnamebyaddr.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getnameinfo.3.gz
OLD_FILES+=usr/share/man/man3/lwres_getrrsetbyname.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnba.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbarequest_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_gnbaresponse_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_herror.3.gz
OLD_FILES+=usr/share/man/man3/lwres_hstrerror.3.gz
OLD_FILES+=usr/share/man/man3/lwres_inetntop.3.gz
OLD_FILES+=usr/share/man/man3/lwres_lwpacket_parseheader.3.gz
OLD_FILES+=usr/share/man/man3/lwres_lwpacket_renderheader.3.gz
OLD_FILES+=usr/share/man/man3/lwres_net_ntop.3.gz
OLD_FILES+=usr/share/man/man3/lwres_noop.3.gz
OLD_FILES+=usr/share/man/man3/lwres_nooprequest_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_nooprequest_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_nooprequest_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_noopresponse_free.3.gz
OLD_FILES+=usr/share/man/man3/lwres_noopresponse_parse.3.gz
OLD_FILES+=usr/share/man/man3/lwres_noopresponse_render.3.gz
OLD_FILES+=usr/share/man/man3/lwres_packet.3.gz
OLD_FILES+=usr/share/man/man3/lwres_resutil.3.gz
OLD_FILES+=usr/share/man/man3/lwres_sethostent.3.gz
OLD_FILES+=usr/share/man/man3/lwres_sethostent_r.3.gz
OLD_FILES+=usr/share/man/man3/lwres_string_parse.3.gz
OLD_FILES+=usr/share/man/man5/named.conf.5.gz
OLD_FILES+=usr/share/man/man5/rndc.conf.5.gz
OLD_FILES+=usr/share/man/man8/ddns-confgen.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-dsfromkey.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-keyfromlabel.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-keygen.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-revoke.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-settime.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-signzone.8.gz
OLD_FILES+=usr/share/man/man8/dnssec-verify.8.gz
OLD_FILES+=usr/share/man/man8/genrandom.8.gz
OLD_FILES+=usr/share/man/man8/isc-hmac-fixup.8.gz
OLD_FILES+=usr/share/man/man8/lwresd.8.gz
OLD_FILES+=usr/share/man/man8/named-checkconf.8.gz
OLD_FILES+=usr/share/man/man8/named-checkzone.8.gz
OLD_FILES+=usr/share/man/man8/named-compilezone.8.gz
OLD_FILES+=usr/share/man/man8/named-journalprint.8.gz
OLD_FILES+=usr/share/man/man8/named.8.gz
OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz
OLD_FILES+=usr/share/man/man8/named.reload.8.gz
OLD_FILES+=usr/share/man/man8/nsec3hash.8.gz
OLD_FILES+=usr/share/man/man8/rndc-confgen.8.gz
OLD_FILES+=usr/share/man/man8/rndc.8.gz
OLD_DIRS+=var/named/dev
OLD_DIRS+=var/named/etc
OLD_DIRS+=var/named/etc/namedb
OLD_FILES+=var/named/etc/namedb/PROTO.localhost-v6.rev
OLD_FILES+=var/named/etc/namedb/PROTO.localhost.rev
OLD_DIRS+=var/named/etc/namedb/dynamic
OLD_FILES+=var/named/etc/namedb/make-localhost
OLD_DIRS+=var/named/etc/namedb/master
OLD_FILES+=var/named/etc/namedb/master/empty.db
OLD_FILES+=var/named/etc/namedb/master/localhost-forward.db
OLD_FILES+=var/named/etc/namedb/master/localhost-reverse.db
#OLD_FILES+=var/named/etc/namedb/named.conf # intentionally left out
OLD_FILES+=var/named/etc/namedb/named.root
OLD_DIRS+=var/named/etc/namedb/working
OLD_DIRS+=var/named/etc/namedb/slave
OLD_DIRS+=var/named/var
OLD_DIRS+=var/named/var/dump
OLD_DIRS+=var/named/var/log
OLD_DIRS+=var/named/var/run
OLD_DIRS+=var/named/var/run/named
OLD_DIRS+=var/named/var/stats
OLD_DIRS+=var/run/named
# 20130923: example moved
OLD_FILES+=usr/share/examples/bsdconfig/browse_packages.sh
# 20130908: libssh becomes private
OLD_FILES+=usr/lib/libssh.a
OLD_FILES+=usr/lib/libssh.so
OLD_LIBS+=usr/lib/libssh.so.5
OLD_FILES+=usr/lib/libssh_p.a
OLD_FILES+=usr/lib32/libssh.a
OLD_FILES+=usr/lib32/libssh.so
OLD_LIBS+=usr/lib32/libssh.so.5
OLD_FILES+=usr/lib32/libssh_p.a
# 20130903: gnupatch is no more
OLD_FILES+=usr/bin/gnupatch
OLD_FILES+=usr/share/man/man1/gnupatch.1.gz
# 20130829: bsdpatch is patch unconditionally
OLD_FILES+=usr/bin/bsdpatch
OLD_FILES+=usr/share/man/man1/bsdpatch.1.gz
# 20130822: bind 9.9.3-P2 import
OLD_LIBS+=usr/lib/liblwres.so.80
# 20130814: vm_page_busy(9)
OLD_FILES+=usr/share/man/man9/vm_page_flash.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_io.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_io_finish.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_io_start.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_wakeup.9.gz
# 20130710: libkvm version bump
OLD_LIBS+=lib/libkvm.so.5
OLD_LIBS+=usr/lib32/libkvm.so.5
# 20130623: dialog update from 1.1 to 1.2
OLD_LIBS+=usr/lib/libdialog.so.7
OLD_LIBS+=usr/lib32/libdialog.so.7
# 20130616: vfs_mount.9 removed
OLD_FILES+=usr/share/man/man9/vfs_mount.9.gz
# 20130614: remove CVS from base
OLD_FILES+=usr/bin/cvs
OLD_FILES+=usr/bin/cvsbug
OLD_FILES+=usr/share/doc/psd/28.cvs/paper.ascii.gz
OLD_FILES+=usr/share/doc/psd/28.cvs/paper.ps.gz
OLD_DIRS+=usr/share/doc/psd/28.cvs
OLD_FILES+=usr/share/examples/cvs/contrib/README
OLD_FILES+=usr/share/examples/cvs/contrib/clmerge
OLD_FILES+=usr/share/examples/cvs/contrib/cln_hist
OLD_FILES+=usr/share/examples/cvs/contrib/commit_prep
OLD_FILES+=usr/share/examples/cvs/contrib/cvs2vendor
OLD_FILES+=usr/share/examples/cvs/contrib/cvs_acls
OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck
OLD_FILES+=usr/share/examples/cvs/contrib/cvscheck.man
OLD_FILES+=usr/share/examples/cvs/contrib/cvshelp.man
OLD_FILES+=usr/share/examples/cvs/contrib/descend.man
OLD_FILES+=usr/share/examples/cvs/contrib/easy-import
OLD_FILES+=usr/share/examples/cvs/contrib/intro.doc
OLD_FILES+=usr/share/examples/cvs/contrib/log
OLD_FILES+=usr/share/examples/cvs/contrib/log_accum
OLD_FILES+=usr/share/examples/cvs/contrib/mfpipe
OLD_FILES+=usr/share/examples/cvs/contrib/rcs-to-cvs
OLD_FILES+=usr/share/examples/cvs/contrib/rcs2log
OLD_FILES+=usr/share/examples/cvs/contrib/rcslock
OLD_FILES+=usr/share/examples/cvs/contrib/sccs2rcs
OLD_DIRS+=usr/share/examples/cvs/contrib
OLD_DIRS+=usr/share/examples/cvs
OLD_FILES+=usr/share/info/cvs.info.gz
OLD_FILES+=usr/share/info/cvsclient.info.gz
OLD_FILES+=usr/share/man/man1/cvs.1.gz
OLD_FILES+=usr/share/man/man5/cvs.5.gz
OLD_FILES+=usr/share/man/man8/cvsbug.8.gz
# 20130607: WITH_DEBUG_FILES added
OLD_FILES+=lib/libufs.so.6.symbols
OLD_FILES+=usr/lib32/libufs.so.6.symbols
# 20130417: nfs fha moved from nfsserver to nfs
OLD_FILES+=usr/include/nfsserver/nfs_fha.h
# 20130411: new clang import which bumps version from 3.2 to 3.3.
OLD_FILES+=usr/include/clang/3.2/__wmmintrin_aes.h
OLD_FILES+=usr/include/clang/3.2/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/clang/3.2/altivec.h
OLD_FILES+=usr/include/clang/3.2/ammintrin.h
OLD_FILES+=usr/include/clang/3.2/avx2intrin.h
OLD_FILES+=usr/include/clang/3.2/avxintrin.h
OLD_FILES+=usr/include/clang/3.2/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.2/bmiintrin.h
OLD_FILES+=usr/include/clang/3.2/cpuid.h
OLD_FILES+=usr/include/clang/3.2/emmintrin.h
OLD_FILES+=usr/include/clang/3.2/f16cintrin.h
OLD_FILES+=usr/include/clang/3.2/fma4intrin.h
OLD_FILES+=usr/include/clang/3.2/fmaintrin.h
OLD_FILES+=usr/include/clang/3.2/immintrin.h
OLD_FILES+=usr/include/clang/3.2/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.2/mm3dnow.h
OLD_FILES+=usr/include/clang/3.2/mm_malloc.h
OLD_FILES+=usr/include/clang/3.2/mmintrin.h
OLD_FILES+=usr/include/clang/3.2/module.map
OLD_FILES+=usr/include/clang/3.2/nmmintrin.h
OLD_FILES+=usr/include/clang/3.2/pmmintrin.h
OLD_FILES+=usr/include/clang/3.2/popcntintrin.h
OLD_FILES+=usr/include/clang/3.2/rtmintrin.h
OLD_FILES+=usr/include/clang/3.2/smmintrin.h
OLD_FILES+=usr/include/clang/3.2/tmmintrin.h
OLD_FILES+=usr/include/clang/3.2/wmmintrin.h
OLD_FILES+=usr/include/clang/3.2/x86intrin.h
OLD_FILES+=usr/include/clang/3.2/xmmintrin.h
OLD_FILES+=usr/include/clang/3.2/xopintrin.h
OLD_DIRS+=usr/include/clang/3.2
# 20130404: legacy ATA stack removed
OLD_FILES+=etc/periodic/daily/405.status-ata-raid
OLD_FILES+=rescue/atacontrol
OLD_FILES+=sbin/atacontrol
OLD_FILES+=usr/share/man/man8/atacontrol.8.gz
OLD_FILES+=usr/share/man/man4/atapicam.4.gz
OLD_FILES+=usr/share/man/man4/ataraid.4.gz
OLD_FILES+=usr/sbin/burncd
OLD_FILES+=usr/share/man/man8/burncd.8.gz
# 20130316: vinum.4 removed
OLD_FILES+=usr/share/man/man4/vinum.4.gz
# 20130312: fortunes-o removed
OLD_FILES+=usr/share/games/fortune/fortunes-o
OLD_FILES+=usr/share/games/fortune/fortunes-o.dat
# 20130311: Ports are no more available via cvsup
OLD_FILES+=usr/share/examples/cvsup/ports-supfile
OLD_FILES+=usr/share/examples/cvsup/refuse
OLD_FILES+=usr/share/examples/cvsup/refuse.README
# 20130309: NWFS and NCP supports removed
OLD_FILES+=usr/bin/ncplist
OLD_FILES+=usr/bin/ncplogin
OLD_FILES+=usr/bin/ncplogout
OLD_FILES+=usr/include/fs/nwfs/nwfs.h
OLD_FILES+=usr/include/fs/nwfs/nwfs_mount.h
OLD_FILES+=usr/include/fs/nwfs/nwfs_node.h
OLD_FILES+=usr/include/fs/nwfs/nwfs_subr.h
OLD_DIRS+=usr/include/fs/nwfs
OLD_FILES+=usr/include/netncp/ncp.h
OLD_FILES+=usr/include/netncp/ncp_cfg.h
OLD_FILES+=usr/include/netncp/ncp_conn.h
OLD_FILES+=usr/include/netncp/ncp_file.h
OLD_FILES+=usr/include/netncp/ncp_lib.h
OLD_FILES+=usr/include/netncp/ncp_ncp.h
OLD_FILES+=usr/include/netncp/ncp_nls.h
OLD_FILES+=usr/include/netncp/ncp_rcfile.h
OLD_FILES+=usr/include/netncp/ncp_rq.h
OLD_FILES+=usr/include/netncp/ncp_sock.h
OLD_FILES+=usr/include/netncp/ncp_subr.h
OLD_FILES+=usr/include/netncp/ncp_user.h
OLD_FILES+=usr/include/netncp/ncpio.h
OLD_FILES+=usr/include/netncp/nwerror.h
OLD_DIRS+=usr/include/netncp
OLD_FILES+=usr/lib/libncp.a
OLD_FILES+=usr/lib/libncp.so
OLD_LIBS+=usr/lib/libncp.so.4
OLD_FILES+=usr/lib/libncp_p.a
OLD_FILES+=usr/lib32/libncp.a
OLD_FILES+=usr/lib32/libncp.so
OLD_LIBS+=usr/lib32/libncp.so.4
OLD_FILES+=usr/lib32/libncp_p.a
OLD_FILES+=usr/sbin/mount_nwfs
OLD_FILES+=usr/share/examples/nwclient/dot.nwfsrc
OLD_FILES+=usr/share/examples/nwclient/nwfs.sh.sample
OLD_DIRS+=usr/share/examples/nwclient
OLD_FILES+=usr/share/man/man1/ncplist.1.gz
OLD_FILES+=usr/share/man/man1/ncplogin.1.gz
OLD_FILES+=usr/share/man/man1/ncplogout.1.gz
OLD_FILES+=usr/share/man/man8/mount_nwfs.8.gz
# 20130302: NTFS support removed
OLD_FILES+=rescue/mount_ntfs
OLD_FILES+=sbin/mount_ntfs
OLD_FILES+=usr/include/fs/ntfs/ntfs.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_compr.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_ihash.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_inode.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_subr.h
OLD_FILES+=usr/include/fs/ntfs/ntfs_vfsops.h
OLD_FILES+=usr/include/fs/ntfs/ntfsmount.h
OLD_DIRS+=usr/include/fs/ntfs
OLD_FILES+=usr/share/man/man8/mount_ntfs.8.gz
# 20130302: PORTALFS support removed
OLD_FILES+=usr/include/fs/portalfs/portal.h
OLD_DIRS+=usr/include/fs/portalfs
OLD_FILES+=usr/sbin/mount_portalfs
OLD_FILES+=usr/share/examples/portal/README
OLD_FILES+=usr/share/examples/portal/portal.conf
OLD_DIRS+=usr/share/examples/portal
OLD_FILES+=usr/share/man/man8/mount_portalfs.8.gz
# 20130302: CODAFS support removed
OLD_FILES+=usr/share/man/man4/coda.4.gz
# 20130302: XFS support removed
OLD_FILES+=usr/share/man/man5/xfs.5.gz
# 20130302: Capsicum overhaul
OLD_FILES+=usr/share/man/man2/cap_getrights.2.gz
OLD_FILES+=usr/share/man/man2/cap_new.2.gz
# 20130213: OpenSSL 1.0.1e import
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_verifyrecover.3.gz
OLD_FILES+=usr/share/openssl/man/man3/EVP_PKEY_verifyrecover_init.3.gz
# 20130116: removed long unused directories for .1aout section manpages
OLD_FILES+=usr/share/man/en.ISO8859-1/man1aout
OLD_FILES+=usr/share/man/en.UTF-8/man1aout
OLD_DIRS+=usr/share/man/man1aout
OLD_DIRS+=usr/share/man/cat1aout
OLD_DIRS+=usr/share/man/en.ISO8859-1/cat1aout
OLD_DIRS+=usr/share/man/en.UTF-8/cat1aout
# 20130110: bsd.compat.mk removed
OLD_FILES+=usr/share/mk/bsd.compat.mk
# 20130103: gnats-supfile removed
OLD_FILES+=usr/share/examples/cvsup/gnats-supfile
# 20121230: libdisk removed
OLD_FILES+=usr/share/man/man3/libdisk.3.gz usr/include/libdisk.h
OLD_FILES+=usr/lib/libdisk.a usr/lib32/libdisk.a
# 20121230: remove wrongly created directories for auditdistd
OLD_DIRS+=var/dist
OLD_DIRS+=var/remote
# 20121114: zpool-features manual page moved from section 5 to 7
OLD_FILES+=usr/share/man/man5/zpool-features.5.gz
# 20121022: remove harp, hfa and idt man page
OLD_FILES+=usr/share/man/man4/harp.4.gz
OLD_FILES+=usr/share/man/man4/hfa.4.gz
OLD_FILES+=usr/share/man/man4/idt.4.gz
OLD_FILES+=usr/share/man/man4/if_idt.4.gz
# 20121022: VFS_LOCK_GIANT elimination
OLD_FILES+=usr/share/man/man9/VFS_LOCK_GIANT.9.gz
OLD_FILES+=usr/share/man/man9/VFS_UNLOCK_GIANT.9.gz
# 20121004: remove incomplete unwind.h
OLD_FILES+=usr/include/clang/3.2/unwind.h
# 20120910: NetBSD compat shims removed
OLD_FILES+=usr/include/cam/scsi/scsi_low_pisa.h
OLD_FILES+=usr/include/sys/device_port.h
# 20120909: doc and www supfiles removed
OLD_FILES+=usr/share/examples/cvsup/doc-supfile
OLD_FILES+=usr/share/examples/cvsup/www-supfile
# 20120908: pf cleanup
OLD_FILES+=usr/include/net/if_pflow.h
# 20120816: new clang import which bumps version from 3.1 to 3.2
OLD_FILES+=usr/bin/llvm-ld
OLD_FILES+=usr/bin/llvm-stub
OLD_FILES+=usr/include/clang/3.1/altivec.h
OLD_FILES+=usr/include/clang/3.1/avx2intrin.h
OLD_FILES+=usr/include/clang/3.1/avxintrin.h
OLD_FILES+=usr/include/clang/3.1/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.1/bmiintrin.h
OLD_FILES+=usr/include/clang/3.1/cpuid.h
OLD_FILES+=usr/include/clang/3.1/emmintrin.h
OLD_FILES+=usr/include/clang/3.1/fma4intrin.h
OLD_FILES+=usr/include/clang/3.1/immintrin.h
OLD_FILES+=usr/include/clang/3.1/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.1/mm3dnow.h
OLD_FILES+=usr/include/clang/3.1/mm_malloc.h
OLD_FILES+=usr/include/clang/3.1/mmintrin.h
OLD_FILES+=usr/include/clang/3.1/module.map
OLD_FILES+=usr/include/clang/3.1/nmmintrin.h
OLD_FILES+=usr/include/clang/3.1/pmmintrin.h
OLD_FILES+=usr/include/clang/3.1/popcntintrin.h
OLD_FILES+=usr/include/clang/3.1/smmintrin.h
OLD_FILES+=usr/include/clang/3.1/tmmintrin.h
OLD_FILES+=usr/include/clang/3.1/unwind.h
OLD_FILES+=usr/include/clang/3.1/wmmintrin.h
OLD_FILES+=usr/include/clang/3.1/x86intrin.h
OLD_FILES+=usr/include/clang/3.1/xmmintrin.h
OLD_DIRS+=usr/include/clang/3.1
OLD_FILES+=usr/share/man/man1/llvm-ld.1.gz
# 20120712: OpenSSL 1.0.1c import
OLD_LIBS+=lib/libcrypto.so.6
OLD_LIBS+=usr/lib/libssl.so.6
OLD_LIBS+=usr/lib32/libcrypto.so.6
OLD_LIBS+=usr/lib32/libssl.so.6
OLD_FILES+=usr/include/openssl/aes_locl.h
OLD_FILES+=usr/include/openssl/bio_lcl.h
OLD_FILES+=usr/include/openssl/e_os.h
OLD_FILES+=usr/include/openssl/fips.h
OLD_FILES+=usr/include/openssl/fips_rand.h
OLD_FILES+=usr/include/openssl/pq_compat.h
OLD_FILES+=usr/include/openssl/tmdiff.h
OLD_FILES+=usr/include/openssl/ui_locl.h
OLD_FILES+=usr/share/openssl/man/man3/CRYPTO_set_id_callback.3.gz
# 20120621: remove old man page
OLD_FILES+=usr/share/man/man8/vnconfig.8.gz
# 20120619: TOE support updated
OLD_FILES+=usr/include/netinet/toedev.h
# 20120613: auth.conf removed
OLD_FILES+=etc/auth.conf
OLD_FILES+=usr/share/examples/etc/auth.conf
OLD_FILES+=usr/share/man/man3/auth.3.gz
OLD_FILES+=usr/share/man/man3/auth_getval.3.gz
OLD_FILES+=usr/share/man/man5/auth.conf.5.gz
# 20120530: kde pam lives now in ports
OLD_FILES+=etc/pam.d/kde
# 20120521: byacc import
OLD_FILES+=usr/bin/yyfix
OLD_FILES+=usr/share/man/man1/yyfix.1.gz
# 20120505: new clang import installed a redundant internal header
OLD_FILES+=usr/include/clang/3.1/stdalign.h
# 20120428: MD2 removed from libmd
OLD_LIBS+=lib/libmd.so.5
OLD_FILES+=usr/include/md2.h
OLD_LIBS+=usr/lib32/libmd.so.5
OLD_FILES+=usr/share/man/man3/MD2Data.3.gz
OLD_FILES+=usr/share/man/man3/MD2End.3.gz
OLD_FILES+=usr/share/man/man3/MD2File.3.gz
OLD_FILES+=usr/share/man/man3/MD2FileChunk.3.gz
OLD_FILES+=usr/share/man/man3/MD2Final.3.gz
OLD_FILES+=usr/share/man/man3/MD2Init.3.gz
OLD_FILES+=usr/share/man/man3/MD2Update.3.gz
OLD_FILES+=usr/share/man/man3/md2.3.gz
# 20120425: libusb version bump (r234684)
OLD_LIBS+=usr/lib/libusb.so.2
OLD_LIBS+=usr/lib32/libusb.so.2
OLD_FILES+=usr/share/man/man3/libsub_get_active_config_descriptor.3.gz
# 20120415: new clang import which bumps version from 3.0 to 3.1
OLD_FILES+=usr/include/clang/3.0/altivec.h
OLD_FILES+=usr/include/clang/3.0/avxintrin.h
OLD_FILES+=usr/include/clang/3.0/emmintrin.h
OLD_FILES+=usr/include/clang/3.0/immintrin.h
OLD_FILES+=usr/include/clang/3.0/mm3dnow.h
OLD_FILES+=usr/include/clang/3.0/mm_malloc.h
OLD_FILES+=usr/include/clang/3.0/mmintrin.h
OLD_FILES+=usr/include/clang/3.0/nmmintrin.h
OLD_FILES+=usr/include/clang/3.0/pmmintrin.h
OLD_FILES+=usr/include/clang/3.0/smmintrin.h
OLD_FILES+=usr/include/clang/3.0/tmmintrin.h
OLD_FILES+=usr/include/clang/3.0/wmmintrin.h
OLD_FILES+=usr/include/clang/3.0/x86intrin.h
OLD_FILES+=usr/include/clang/3.0/xmmintrin.h
OLD_DIRS+=usr/include/clang/3.0
# 20120412: BIND 9.8.1 release notes removed
OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.pdf
OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.txt
OLD_FILES+=usr/share/doc/bind9/RELEASE-NOTES-BIND-9.8.1.html
OLD_FILES+=usr/share/doc/bind9/release-notes.css
# 20120330: legacy(4) moved to x86
OLD_FILES+=usr/include/machine/legacyvar.h
# 20120324: MPI headers updated
OLD_FILES+=usr/include/dev/mpt/mpilib/mpi_inb.h
# 20120322: hwpmc_mips24k.h removed
OLD_FILES+=usr/include/dev/hwpmc/hwpmc_mips24k.h
# 20120322: Update heimdal to 1.5.1.
OLD_FILES+=usr/include/krb5-v4compat.h \
usr/include/krb_err.h \
usr/include/hdb-private.h \
usr/share/man/man3/krb5_addresses.3.gz \
usr/share/man/man3/krb5_cc_cursor.3.gz \
usr/share/man/man3/krb5_cc_ops.3.gz \
usr/share/man/man3/krb5_config.3.gz \
usr/share/man/man3/krb5_config_get_int_default.3.gz \
usr/share/man/man3/krb5_context.3.gz \
usr/share/man/man3/krb5_data.3.gz \
usr/share/man/man3/krb5_err.3.gz \
usr/share/man/man3/krb5_errx.3.gz \
usr/share/man/man3/krb5_keyblock.3.gz \
usr/share/man/man3/krb5_keytab_entry.3.gz \
usr/share/man/man3/krb5_kt_cursor.3.gz \
usr/share/man/man3/krb5_kt_ops.3.gz \
usr/share/man/man3/krb5_set_warn_dest.3.gz \
usr/share/man/man3/krb5_verr.3.gz \
usr/share/man/man3/krb5_verrx.3.gz \
usr/share/man/man3/krb5_vwarnx.3.gz \
usr/share/man/man3/krb5_warn.3.gz \
usr/share/man/man3/krb5_warnx.3.gz
OLD_LIBS+=usr/lib/libasn1.so.10 \
usr/lib/libhdb.so.10 \
usr/lib/libheimntlm.so.10 \
usr/lib/libhx509.so.10 \
usr/lib/libkadm5clnt.so.10 \
usr/lib/libkadm5srv.so.10 \
usr/lib/libkafs5.so.10 \
usr/lib/libkrb5.so.10 \
usr/lib/libroken.so.10 \
usr/lib32/libasn1.so.10 \
usr/lib32/libhdb.so.10 \
usr/lib32/libheimntlm.so.10 \
usr/lib32/libhx509.so.10 \
usr/lib32/libkadm5clnt.so.10 \
usr/lib32/libkadm5srv.so.10 \
usr/lib32/libkafs5.so.10 \
usr/lib32/libkrb5.so.10 \
usr/lib32/libroken.so.10
# 20120309: Remove fifofs header files.
OLD_FILES+=usr/include/fs/fifofs/fifo.h
OLD_DIRS+=usr/include/fs/fifofs
# 20120304: xlocale cleanup
OLD_FILES+=usr/include/_xlocale_ctype.h
# 20120225: libarchive 3.0.3
OLD_FILES+=usr/share/man/man3/archive_read_data_into_buffer.3.gz \
usr/share/man/man3/archive_read_support_compression_all.3.gz \
usr/share/man/man3/archive_read_support_compression_bzip2.3.gz \
usr/share/man/man3/archive_read_support_compression_compress.3.gz \
usr/share/man/man3/archive_read_support_compression_gzip.3.gz \
usr/share/man/man3/archive_read_support_compression_lzma.3.gz \
usr/share/man/man3/archive_read_support_compression_none.3.gz \
usr/share/man/man3/archive_read_support_compression_program.3.gz \
usr/share/man/man3/archive_read_support_compression_program_signature.3.gz \
usr/share/man/man3/archive_read_support_compression_xz.3.gz \
usr/share/man/man3/archive_write_set_callbacks.3.gz \
usr/share/man/man3/archive_write_set_compression_bzip2.3.gz \
usr/share/man/man3/archive_write_set_compression_compress.3.gz \
usr/share/man/man3/archive_write_set_compression_gzip.3.gz \
usr/share/man/man3/archive_write_set_compression_none.3.gz \
usr/share/man/man3/archive_write_set_compression_program.3.gz
OLD_LIBS+=usr/lib/libarchive.so.5
OLD_LIBS+=usr/lib32/libarchive.so.5
# 20120113: removal of wtmpcvt(1)
OLD_FILES+=usr/bin/wtmpcvt
OLD_FILES+=usr/share/man/man1/wtmpcvt.1.gz
# 20111214: eventtimers(7) moved to eventtimers(4)
OLD_FILES+=usr/share/man/man7/eventtimers.7.gz
# 20111125: amd(4) removed
OLD_FILES+=usr/share/man/man4/amd.4.gz
# 20111125: libodialog removed
OLD_FILES+=usr/lib/libodialog.a
OLD_FILES+=usr/lib/libodialog.so
OLD_LIBS+=usr/lib/libodialog.so.7
OLD_FILES+=usr/lib/libodialog_p.a
OLD_FILES+=usr/lib32/libodialog.a
OLD_FILES+=usr/lib32/libodialog.so
OLD_LIBS+=usr/lib32/libodialog.so.7
OLD_FILES+=usr/lib32/libodialog_p.a
# 20110930: sysinstall removed
OLD_FILES+=usr/sbin/sysinstall
OLD_FILES+=usr/share/man/man8/sysinstall.8.gz
OLD_FILES+=usr/lib/libftpio.a
OLD_FILES+=usr/lib/libftpio.so
OLD_LIBS+=usr/lib/libftpio.so.8
OLD_FILES+=usr/lib/libftpio_p.a
OLD_FILES+=usr/lib32/libftpio.a
OLD_FILES+=usr/lib32/libftpio.so
OLD_LIBS+=usr/lib32/libftpio.so.8
OLD_FILES+=usr/lib32/libftpio_p.a
OLD_FILES+=usr/include/ftpio.h
OLD_FILES+=usr/share/man/man3/ftpio.3.gz
# 20110915: rename congestion control manpages
OLD_FILES+=usr/share/man/man9/cc.9.gz
# 20110831: atomic page flags operations
OLD_FILES+=usr/share/man/man9/vm_page_flag.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_flag_clear.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_flag_set.9.gz
# 20110828: library version bump for 9.0
OLD_LIBS+=lib/libcam.so.5
OLD_LIBS+=lib/libpcap.so.7
OLD_LIBS+=lib/libufs.so.5
OLD_LIBS+=usr/lib/libbsnmp.so.5
OLD_LIBS+=usr/lib/libdwarf.so.2
OLD_LIBS+=usr/lib/libopie.so.6
OLD_LIBS+=usr/lib/librtld_db.so.1
OLD_LIBS+=usr/lib/libtacplus.so.4
OLD_LIBS+=usr/lib32/libcam.so.5
OLD_LIBS+=usr/lib32/libpcap.so.7
OLD_LIBS+=usr/lib32/libufs.so.5
OLD_LIBS+=usr/lib32/libbsnmp.so.5
OLD_LIBS+=usr/lib32/libdwarf.so.2
OLD_LIBS+=usr/lib32/libopie.so.6
OLD_LIBS+=usr/lib32/librtld_db.so.1
OLD_LIBS+=usr/lib32/libtacplus.so.4
# 20110817: no more acd.4, ad.4, afd.4 and ast.4
OLD_FILES+=usr/share/man/man4/acd.4.gz
OLD_FILES+=usr/share/man/man4/ad.4.gz
OLD_FILES+=usr/share/man/man4/afd.4.gz
OLD_FILES+=usr/share/man/man4/ast.4.gz
# 20110718: no longer useful in the age of rc.d
OLD_FILES+=usr/sbin/named.reconfig
OLD_FILES+=usr/sbin/named.reload
OLD_FILES+=usr/share/man/man8/named.reconfig.8.gz
OLD_FILES+=usr/share/man/man8/named.reload.8.gz
# 20110716: bind 9.8.0 import
OLD_LIBS+=usr/lib/liblwres.so.50
OLD_FILES+=usr/share/doc/bind9/KNOWN-DEFECTS
OLD_FILES+=usr/share/doc/bind9/NSEC3-NOTES
OLD_FILES+=usr/share/doc/bind9/README.idnkit
OLD_FILES+=usr/share/doc/bind9/README.pkcs11
# 20110709: vm_map_clean.9 -> vm_map_sync.9
OLD_FILES+=usr/share/man/man9/vm_map_clean.9.gz
# 20110709: Catch up with removal of these functions.
OLD_FILES+=usr/share/man/man9/vm_page_copy.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_protect.9.gz
OLD_FILES+=usr/share/man/man9/vm_page_zero_fill.9.gz
# 20110707: script no longer needed by /etc/rc.d/nfsd
OLD_FILES+=etc/rc.d/nfsserver
# 20110705: files moved so both NFS clients can share them
OLD_FILES+=usr/include/nfsclient/krpc.h
OLD_FILES+=usr/include/nfsclient/nfsdiskless.h
# 20110705: the switch of default NFS client to the new one
OLD_FILES+=sbin/mount_newnfs
OLD_FILES+=usr/share/man/man8/mount_newnfs.8.gz
OLD_FILES+=usr/include/nfsclient/nfs_kdtrace.h
# 20110628: calendar.msk removed
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.msk
# 20110517: libpkg removed
OLD_FILES+=usr/include/pkg.h
OLD_FILES+=usr/lib/libpkg.a
OLD_FILES+=usr/lib/libpkg.so
OLD_LIBS+=usr/lib/libpkg.so.0
OLD_FILES+=usr/lib/libpkg_p.a
OLD_FILES+=usr/lib32/libpkg.a
OLD_FILES+=usr/lib32/libpkg.so
OLD_LIBS+=usr/lib32/libpkg.so.0
OLD_FILES+=usr/lib32/libpkg_p.a
# 20110517: libsbuf version bump
OLD_LIBS+=lib/libsbuf.so.5
OLD_LIBS+=usr/lib32/libsbuf.so.5
# 20110502: new clang import which bumps version from 2.9 to 3.0
OLD_FILES+=usr/include/clang/2.9/emmintrin.h
OLD_FILES+=usr/include/clang/2.9/mm_malloc.h
OLD_FILES+=usr/include/clang/2.9/mmintrin.h
OLD_FILES+=usr/include/clang/2.9/pmmintrin.h
OLD_FILES+=usr/include/clang/2.9/tmmintrin.h
OLD_FILES+=usr/include/clang/2.9/xmmintrin.h
OLD_DIRS+=usr/include/clang/2.9
# 20110417: removal of Objective-C support
OLD_FILES+=usr/include/objc/encoding.h
OLD_FILES+=usr/include/objc/hash.h
OLD_FILES+=usr/include/objc/NXConstStr.h
OLD_FILES+=usr/include/objc/objc-api.h
OLD_FILES+=usr/include/objc/objc-decls.h
OLD_FILES+=usr/include/objc/objc-list.h
OLD_FILES+=usr/include/objc/objc.h
OLD_FILES+=usr/include/objc/Object.h
OLD_FILES+=usr/include/objc/Protocol.h
OLD_FILES+=usr/include/objc/runtime.h
OLD_FILES+=usr/include/objc/sarray.h
OLD_FILES+=usr/include/objc/thr.h
OLD_FILES+=usr/include/objc/typedstream.h
OLD_FILES+=usr/lib/libobjc.a
OLD_FILES+=usr/lib/libobjc.so
OLD_FILES+=usr/lib/libobjc_p.a
OLD_FILES+=usr/libexec/cc1obj
OLD_LIBS+=usr/lib/libobjc.so.4
OLD_DIRS+=usr/include/objc
OLD_FILES+=usr/lib32/libobjc.a
OLD_FILES+=usr/lib32/libobjc.so
OLD_FILES+=usr/lib32/libobjc_p.a
OLD_LIBS+=usr/lib32/libobjc.so.4
# 20110331: firmware.img created at build time
OLD_FILES+=usr/share/examples/kld/firmware/fwimage/firmware.img
# 20110224: sticky.8 -> sticky.7
OLD_FILES+=usr/share/man/man8/sticky.8.gz
# 20110220: new clang import which bumps version from 2.8 to 2.9
OLD_FILES+=usr/include/clang/2.8/emmintrin.h
OLD_FILES+=usr/include/clang/2.8/mm_malloc.h
OLD_FILES+=usr/include/clang/2.8/mmintrin.h
OLD_FILES+=usr/include/clang/2.8/pmmintrin.h
OLD_FILES+=usr/include/clang/2.8/tmmintrin.h
OLD_FILES+=usr/include/clang/2.8/xmmintrin.h
OLD_DIRS+=usr/include/clang/2.8
# 20110119: netinet/sctp_cc_functions.h removed
OLD_FILES+=usr/include/netinet/sctp_cc_functions.h
# 20110119: Remove SYSCTL_*X* sysctl additions.
OLD_FILES+=usr/share/man/man9/SYSCTL_XINT.9.gz \
usr/share/man/man9/SYSCTL_XLONG.9.gz
# 20110112: Update dialog to new version, rename old libdialog to libodialog,
# removing associated man pages and header files.
OLD_FILES+=usr/share/man/man3/draw_shadow.3.gz \
usr/share/man/man3/draw_box.3.gz usr/share/man/man3/line_edit.3.gz \
usr/share/man/man3/strheight.3.gz usr/share/man/man3/strwidth.3.gz \
usr/share/man/man3/dialog_create_rc.3.gz \
usr/share/man/man3/dialog_yesno.3.gz usr/share/man/man3/dialog_noyes.3.gz \
usr/share/man/man3/dialog_prgbox.3.gz \
usr/share/man/man3/dialog_textbox.3.gz usr/share/man/man3/dialog_menu.3.gz \
usr/share/man/man3/dialog_checklist.3.gz \
usr/share/man/man3/dialog_radiolist.3.gz \
usr/share/man/man3/dialog_inputbox.3.gz \
usr/share/man/man3/dialog_clear_norefresh.3.gz \
usr/share/man/man3/dialog_clear.3.gz usr/share/man/man3/dialog_update.3.gz \
usr/share/man/man3/dialog_fselect.3.gz \
usr/share/man/man3/dialog_notify.3.gz \
usr/share/man/man3/dialog_mesgbox.3.gz \
usr/share/man/man3/dialog_gauge.3.gz usr/share/man/man3/init_dialog.3.gz \
usr/share/man/man3/end_dialog.3.gz usr/share/man/man3/use_helpfile.3.gz \
usr/share/man/man3/use_helpline.3.gz usr/share/man/man3/get_helpline.3.gz \
usr/share/man/man3/restore_helpline.3.gz \
usr/share/man/man3/dialog_msgbox.3.gz \
usr/share/man/man3/dialog_ftree.3.gz usr/share/man/man3/dialog_tree.3.gz \
usr/share/examples/dialog/README usr/share/examples/dialog/checklist \
usr/share/examples/dialog/ftreebox usr/share/examples/dialog/infobox \
usr/share/examples/dialog/inputbox usr/share/examples/dialog/menubox \
usr/share/examples/dialog/msgbox usr/share/examples/dialog/prgbox \
usr/share/examples/dialog/radiolist usr/share/examples/dialog/textbox \
usr/share/examples/dialog/treebox usr/share/examples/dialog/yesno \
usr/share/examples/libdialog/Makefile usr/share/examples/libdialog/check1.c\
usr/share/examples/libdialog/check2.c usr/share/examples/libdialog/check3.c\
usr/share/examples/libdialog/dselect.c \
usr/share/examples/libdialog/fselect.c \
usr/share/examples/libdialog/ftree1.c \
usr/share/examples/libdialog/ftree1.test \
usr/share/examples/libdialog/ftree2.c \
usr/share/examples/libdialog/ftree2.test \
usr/share/examples/libdialog/gauge.c usr/share/examples/libdialog/input1.c \
usr/share/examples/libdialog/input2.c usr/share/examples/libdialog/menu1.c \
usr/share/examples/libdialog/menu2.c usr/share/examples/libdialog/menu3.c \
usr/share/examples/libdialog/msg.c usr/share/examples/libdialog/prgbox.c \
usr/share/examples/libdialog/radio1.c usr/share/examples/libdialog/radio2.c\
usr/share/examples/libdialog/radio3.c usr/share/examples/libdialog/text.c \
usr/share/examples/libdialog/tree.c usr/share/examples/libdialog/yesno.c
OLD_DIRS+=usr/share/examples/libdialog usr/share/examples/dialog
# 20101114: Remove long-obsolete MAKEDEV.8
OLD_FILES+=usr/share/man/man8/MAKEDEV.8.gz
# 20101112: vgonel(9) has gone to private API a while ago
OLD_FILES+=usr/share/man/man9/vgonel.9.gz
# 20101112: removed gasp.info
OLD_FILES+=usr/share/info/gasp.info.gz
# 20101109: machine/mutex.h removed
OLD_FILES+=usr/include/machine/mutex.h
# 20101109: headers moved from machine/ to x86/
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/mptable.h
.endif
# 20101101: headers moved from machine/ to x86/
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/apicreg.h
OLD_FILES+=usr/include/machine/mca.h
.endif
# 20101020: catch up with vm_page_sleep_if_busy rename
OLD_FILES+=usr/share/man/man9/vm_page_sleep_busy.9.gz
# 20101018: taskqueue(9) updates
OLD_FILES+=usr/share/man/man9/taskqueue_find.9.gz
# 20101011: removed subblock.h from liblzma
OLD_FILES+=usr/include/lzma/subblock.h
# 20101002: removed manpath.config
OLD_FILES+=etc/manpath.config
OLD_FILES+=usr/share/examples/etc/manpath.config
# 20100910: renamed sbuf_overflowed to sbuf_error
OLD_FILES+=usr/share/man/man9/sbuf_overflowed.9.gz
# 20100815: retired last traces of chooseproc(9)
OLD_FILES+=usr/share/man/man9/chooseproc.9.gz
# 20100806: removal of unused libcompat routines
OLD_FILES+=usr/share/man/man3/ascftime.3.gz
OLD_FILES+=usr/share/man/man3/cfree.3.gz
OLD_FILES+=usr/share/man/man3/cftime.3.gz
OLD_FILES+=usr/share/man/man3/getpw.3.gz
# 20100801: tzdata2010k import
OLD_FILES+=usr/share/zoneinfo/Pacific/Ponape
OLD_FILES+=usr/share/zoneinfo/Pacific/Truk
# 20100725: acpi_aiboost(4) removal.
OLD_FILES+=usr/share/man/man4/acpi_aiboost.4.gz
# 20100724: nfsclient/nfs_lock.h moved to nfs/nfs_lock.h
OLD_FILES+=usr/include/nfsclient/nfs_lock.h
# 20100720: new clang import which bumps version from 2.0 to 2.8
OLD_FILES+=usr/include/clang/2.0/emmintrin.h
OLD_FILES+=usr/include/clang/2.0/mm_malloc.h
OLD_FILES+=usr/include/clang/2.0/mmintrin.h
OLD_FILES+=usr/include/clang/2.0/pmmintrin.h
OLD_FILES+=usr/include/clang/2.0/tmmintrin.h
OLD_FILES+=usr/include/clang/2.0/xmmintrin.h
OLD_DIRS+=usr/include/clang/2.0
# 20100706: removed pc-sysinstall's detect-vmware.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-vmware.sh
# 20100701: [powerpc] removed <machine/intr.h>
.if ${TARGET_ARCH} == "powerpc"
OLD_FILES+=usr/include/machine/intr.h
.endif
# 20100514: library version bump for versioned symbols for liblzma
OLD_LIBS+=usr/lib/liblzma.so.0
OLD_LIBS+=usr/lib32/liblzma.so.0
# 20100511: move GCC-specific headers to /usr/include/gcc
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/emmintrin.h
OLD_FILES+=usr/include/mm_malloc.h
OLD_FILES+=usr/include/pmmintrin.h
OLD_FILES+=usr/include/xmmintrin.h
.endif
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386" || ${TARGET_ARCH} == "arm"
OLD_FILES+=usr/include/mmintrin.h
.endif
.if ${TARGET_ARCH} == "powerpc"
OLD_FILES+=usr/include/altivec.h
OLD_FILES+=usr/include/ppc-asm.h
OLD_FILES+=usr/include/spe.h
.endif
# 20100416: [mips] removed <machine/psl.h>
.if ${TARGET_ARCH} == "mips"
OLD_FILES+=usr/include/machine/psl.h
.endif
# 20100415: [mips] removed unused headers
.if ${TARGET_ARCH} == "mips"
OLD_FILES+=usr/include/machine/archtype.h
OLD_FILES+=usr/include/machine/segments.h
OLD_FILES+=usr/include/machine/rm7000.h
OLD_FILES+=usr/include/machine/defs.h
OLD_FILES+=usr/include/machine/queue.h
.endif
# 20100326: gcpio removal
OLD_FILES+=usr/bin/gcpio
OLD_FILES+=usr/share/info/cpio.info.gz
OLD_FILES+=usr/share/man/man1/gcpio.1.gz
# 20100322: libz update
OLD_LIBS+=lib/libz.so.5
OLD_LIBS+=usr/lib32/libz.so.5
# 20100314: removal of regexp.h
OLD_FILES+=usr/include/regexp.h
OLD_FILES+=usr/share/man/man3/regexp.3.gz
OLD_FILES+=usr/share/man/man3/regsub.3.gz
# 20100303: actual removal of utmp.h
OLD_FILES+=usr/include/utmp.h
# 20100208: man pages moved
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/i386/alpm.4.gz
OLD_FILES+=usr/share/man/man4/i386/amdpm.4.gz
OLD_FILES+=usr/share/man/man4/i386/mcd.4.gz
OLD_FILES+=usr/share/man/man4/i386/padlock.4.gz
OLD_FILES+=usr/share/man/man4/i386/pcf.4.gz
OLD_FILES+=usr/share/man/man4/i386/scd.4.gz
OLD_FILES+=usr/share/man/man4/i386/viapm.4.gz
.endif
# 20100122: move BSDL bc/dc USD documents to /usr/share/doc/usd
OLD_FILES+=usr/share/doc/papers/bc.ascii.gz
OLD_FILES+=usr/share/doc/papers/dc.ascii.gz
# 20100120: replacing GNU bc/dc with BSDL versions
OLD_FILES+=usr/share/examples/bc/ckbook.b
OLD_FILES+=usr/share/examples/bc/pi.b
OLD_FILES+=usr/share/examples/bc/primes.b
OLD_FILES+=usr/share/examples/bc/twins.b
OLD_FILES+=usr/share/info/dc.info.gz
OLD_DIRS+=usr/share/examples/bc
# 20100114: removal of ttyslot(3)
OLD_FILES+=usr/share/man/man3/ttyslot.3.gz
# 20100113: remove utmp.h, replace it by utmpx.h
OLD_FILES+=usr/share/man/man3/login.3.gz
OLD_FILES+=usr/share/man/man3/logout.3.gz
OLD_FILES+=usr/share/man/man3/logwtmp.3.gz
OLD_FILES+=usr/share/man/man3/ulog_endutxent.3.gz
OLD_FILES+=usr/share/man/man3/ulog_getutxent.3.gz
OLD_FILES+=usr/share/man/man3/ulog_getutxline.3.gz
OLD_FILES+=usr/share/man/man3/ulog_getutxuser.3.gz
OLD_FILES+=usr/share/man/man3/ulog_pututxline.3.gz
OLD_FILES+=usr/share/man/man3/ulog_setutxent.3.gz
OLD_FILES+=usr/share/man/man3/ulog_setutxfile.3.gz
OLD_FILES+=usr/share/man/man5/lastlog.5.gz
OLD_FILES+=usr/share/man/man5/utmp.5.gz
OLD_FILES+=usr/share/man/man5/wtmp.5.gz
OLD_LIBS+=lib/libutil.so.8
OLD_LIBS+=usr/lib32/libutil.so.8
# 20100105: new userland semaphore implementation
OLD_FILES+=usr/include/sys/semaphore.h
# 20100103: ntptrace(8) removed
OLD_FILES+=usr/sbin/ntptrace
OLD_FILES+=usr/share/man/man8/ntptrace.8.gz
# 20091229: remove no longer relevant examples
OLD_FILES+=usr/share/examples/pppd/auth-down.sample
OLD_FILES+=usr/share/examples/pppd/auth-up.sample
OLD_FILES+=usr/share/examples/pppd/chap-secrets.sample
OLD_FILES+=usr/share/examples/pppd/chat.sh.sample
OLD_FILES+=usr/share/examples/pppd/ip-down.sample
OLD_FILES+=usr/share/examples/pppd/ip-up.sample
OLD_FILES+=usr/share/examples/pppd/options.sample
OLD_FILES+=usr/share/examples/pppd/pap-secrets.sample
OLD_FILES+=usr/share/examples/pppd/ppp.deny.sample
OLD_FILES+=usr/share/examples/pppd/ppp.shells.sample
OLD_DIRS+=usr/share/examples/pppd
OLD_FILES+=usr/share/examples/slattach/unit-command.sh
OLD_DIRS+=usr/share/examples/slattach
OLD_FILES+=usr/share/examples/sliplogin/slip.hosts
OLD_FILES+=usr/share/examples/sliplogin/slip.login
OLD_FILES+=usr/share/examples/sliplogin/slip.logout
OLD_FILES+=usr/share/examples/sliplogin/slip.slparms
OLD_DIRS+=usr/share/examples/sliplogin
OLD_FILES+=usr/share/examples/startslip/sldown.sh
OLD_FILES+=usr/share/examples/startslip/slip.sh
OLD_FILES+=usr/share/examples/startslip/slup.sh
OLD_DIRS+=usr/share/examples/startslip
# 20091202: unify rc.firewall and rc.firewall6.
OLD_FILES+=etc/rc.d/ip6fw
OLD_FILES+=etc/rc.firewall6
OLD_FILES+=usr/share/examples/etc/rc.firewall6
# 20091117: removal of rc.early(8) link
OLD_FILES+=usr/share/man/man8/rc.early.8.gz
# 20091117: usr/share/zoneinfo/GMT link removed
OLD_FILES+=usr/share/zoneinfo/GMT
# 20091027: pselect.3 implemented as syscall
OLD_FILES+=usr/share/man/man3/pselect.3.gz
# 20091005: fusword.9 and susword.9 removed
OLD_FILES+=usr/share/man/man9/fusword.9.gz
OLD_FILES+=usr/share/man/man9/susword.9.gz
# 20090909: vesa and dpms promoted to be i386/amd64 common
OLD_FILES+=usr/include/machine/pc/vesa.h
OLD_FILES+=usr/share/man/man4/i386/dpms.4.gz
# 20090904: remove lukemftpd
OLD_FILES+=usr/libexec/lukemftpd
OLD_FILES+=usr/share/man/man5/ftpd.conf.5.gz
OLD_FILES+=usr/share/man/man5/ftpusers.5.gz
OLD_FILES+=usr/share/man/man8/lukemftpd.8.gz
# 20090902: BSD.{x11,x11-4}.dist are dead and BSD.local.dist lives in ports/
OLD_FILES+=etc/mtree/BSD.local.dist
OLD_FILES+=etc/mtree/BSD.x11.dist
OLD_FILES+=etc/mtree/BSD.x11-4.dist
# 20090812: net80211 documentation overhaul
OLD_FILES+=usr/share/man/man9/ieee80211_add_rates.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_add_xrates.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_alloc_node.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_attach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_begin_scan.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_cfgget.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_cfgset.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_chan2ieee.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_chan2mode.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_create_ibss.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_crypto_attach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_crypto_detach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_decap.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_dump_pkt.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_dup_bss.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_encap.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_end_scan.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_find_node.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_fix_rate.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_free_allnodes.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_ieee2mhz.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_ioctl.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_lookup_node.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_media2rate.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_media_change.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_media_init.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_media_status.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_mhz2ieee.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_next_scan.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_node_attach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_node_detach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_node_lateattach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_print_essid.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_proto_attach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_proto_detach.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_rate2media.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_recv_mgmt.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_send_mgmt.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_setmode.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_timeout_nodes.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_watchdog.9.gz
OLD_FILES+=usr/share/man/man9/ieee80211_wep_crypt.9.gz
# 20090801: vimage.h removed in favour of vnet.h
OLD_FILES+=usr/include/sys/vimage.h
# 20101208: libbsnmp was moved to usr/lib
OLD_LIBS+=lib/libbsnmp.so.5
# 20090719: library version bump for 8.0
OLD_LIBS+=lib/libalias.so.6
OLD_LIBS+=lib/libavl.so.1
OLD_LIBS+=lib/libbegemot.so.3
OLD_LIBS+=lib/libbsdxml.so.3
OLD_LIBS+=lib/libbsnmp.so.4
OLD_LIBS+=lib/libcam.so.4
OLD_LIBS+=lib/libcrypt.so.4
OLD_LIBS+=lib/libcrypto.so.5
OLD_LIBS+=lib/libctf.so.1
OLD_LIBS+=lib/libdevstat.so.6
OLD_LIBS+=lib/libdtrace.so.1
OLD_LIBS+=lib/libedit.so.6
OLD_LIBS+=lib/libgeom.so.4
OLD_LIBS+=lib/libipsec.so.3
OLD_LIBS+=lib/libipx.so.4
OLD_LIBS+=lib/libkiconv.so.3
OLD_LIBS+=lib/libkvm.so.4
OLD_LIBS+=lib/libmd.so.4
OLD_LIBS+=lib/libncurses.so.7
OLD_LIBS+=lib/libncursesw.so.7
OLD_LIBS+=lib/libnvpair.so.1
OLD_LIBS+=lib/libpcap.so.6
OLD_LIBS+=lib/libreadline.so.7
OLD_LIBS+=lib/libsbuf.so.4
OLD_LIBS+=lib/libufs.so.4
OLD_LIBS+=lib/libumem.so.1
OLD_LIBS+=lib/libutil.so.7
OLD_LIBS+=lib/libuutil.so.1
OLD_LIBS+=lib/libz.so.4
OLD_LIBS+=lib/libzfs.so.1
OLD_LIBS+=lib/libzpool.so.1
OLD_LIBS+=usr/lib/libarchive.so.4
OLD_LIBS+=usr/lib/libauditd.so.4
OLD_LIBS+=usr/lib/libbluetooth.so.3
OLD_LIBS+=usr/lib/libbsm.so.2
OLD_LIBS+=usr/lib/libbz2.so.3
OLD_LIBS+=usr/lib/libcalendar.so.4
OLD_LIBS+=usr/lib/libcom_err.so.4
OLD_LIBS+=usr/lib/libdevinfo.so.4
OLD_LIBS+=usr/lib/libdialog.so.6
OLD_LIBS+=usr/lib/libdwarf.so.1
OLD_LIBS+=usr/lib/libfetch.so.5
OLD_LIBS+=usr/lib/libform.so.4
OLD_LIBS+=usr/lib/libformw.so.4
OLD_LIBS+=usr/lib/libftpio.so.7
OLD_LIBS+=usr/lib/libgnuregex.so.4
OLD_LIBS+=usr/lib/libgpib.so.2
OLD_LIBS+=usr/lib/libhistory.so.7
OLD_LIBS+=usr/lib/libmagic.so.3
OLD_LIBS+=usr/lib/libmemstat.so.2
OLD_LIBS+=usr/lib/libmenu.so.4
OLD_LIBS+=usr/lib/libmenuw.so.4
OLD_LIBS+=usr/lib/libmilter.so.4
OLD_LIBS+=usr/lib/libncp.so.3
OLD_LIBS+=usr/lib/libnetgraph.so.3
OLD_LIBS+=usr/lib/libngatm.so.3
OLD_LIBS+=usr/lib/libobjc.so.3
OLD_LIBS+=usr/lib/libopie.so.5
OLD_LIBS+=usr/lib/libpam.so.4
OLD_LIBS+=usr/lib/libpanel.so.4
OLD_LIBS+=usr/lib/libpanelw.so.4
OLD_LIBS+=usr/lib/libpmc.so.4
OLD_LIBS+=usr/lib/libproc.so.1
OLD_LIBS+=usr/lib/libradius.so.3
OLD_LIBS+=usr/lib/librpcsvc.so.4
OLD_LIBS+=usr/lib/libsdp.so.3
OLD_LIBS+=usr/lib/libsmb.so.3
OLD_LIBS+=usr/lib/libssh.so.4
OLD_LIBS+=usr/lib/libssl.so.5
OLD_LIBS+=usr/lib/libtacplus.so.3
OLD_LIBS+=usr/lib/libugidfw.so.3
OLD_LIBS+=usr/lib/libusb.so.1
OLD_LIBS+=usr/lib/libusbhid.so.3
OLD_LIBS+=usr/lib/libvgl.so.5
OLD_LIBS+=usr/lib/libwrap.so.5
OLD_LIBS+=usr/lib/libypclnt.so.3
OLD_LIBS+=usr/lib/pam_chroot.so.4
OLD_LIBS+=usr/lib/pam_deny.so.4
OLD_LIBS+=usr/lib/pam_echo.so.4
OLD_LIBS+=usr/lib/pam_exec.so.4
OLD_LIBS+=usr/lib/pam_ftpusers.so.4
OLD_LIBS+=usr/lib/pam_group.so.4
OLD_LIBS+=usr/lib/pam_guest.so.4
OLD_LIBS+=usr/lib/pam_krb5.so.4
OLD_LIBS+=usr/lib/pam_ksu.so.4
OLD_LIBS+=usr/lib/pam_lastlog.so.4
OLD_LIBS+=usr/lib/pam_login_access.so.4
OLD_LIBS+=usr/lib/pam_nologin.so.4
OLD_LIBS+=usr/lib/pam_opie.so.4
OLD_LIBS+=usr/lib/pam_opieaccess.so.4
OLD_LIBS+=usr/lib/pam_passwdqc.so.4
OLD_LIBS+=usr/lib/pam_permit.so.4
OLD_LIBS+=usr/lib/pam_radius.so.4
OLD_LIBS+=usr/lib/pam_rhosts.so.4
OLD_LIBS+=usr/lib/pam_rootok.so.4
OLD_LIBS+=usr/lib/pam_securetty.so.4
OLD_LIBS+=usr/lib/pam_self.so.4
OLD_LIBS+=usr/lib/pam_ssh.so.4
OLD_LIBS+=usr/lib/pam_tacplus.so.4
OLD_LIBS+=usr/lib/pam_unix.so.4
OLD_LIBS+=usr/lib/snmp_atm.so.5
OLD_LIBS+=usr/lib/snmp_bridge.so.5
OLD_LIBS+=usr/lib/snmp_hostres.so.5
OLD_LIBS+=usr/lib/snmp_mibII.so.5
OLD_LIBS+=usr/lib/snmp_netgraph.so.5
OLD_LIBS+=usr/lib/snmp_pf.so.5
OLD_LIBS+=usr/lib32/libalias.so.6
OLD_LIBS+=usr/lib32/libarchive.so.4
OLD_LIBS+=usr/lib32/libauditd.so.4
OLD_LIBS+=usr/lib32/libavl.so.1
OLD_LIBS+=usr/lib32/libbegemot.so.3
OLD_LIBS+=usr/lib32/libbluetooth.so.3
OLD_LIBS+=usr/lib32/libbsdxml.so.3
OLD_LIBS+=usr/lib32/libbsm.so.2
OLD_LIBS+=usr/lib32/libbsnmp.so.4
OLD_LIBS+=usr/lib32/libbz2.so.3
OLD_LIBS+=usr/lib32/libcalendar.so.4
OLD_LIBS+=usr/lib32/libcam.so.4
OLD_LIBS+=usr/lib32/libcom_err.so.4
OLD_LIBS+=usr/lib32/libcrypt.so.4
OLD_LIBS+=usr/lib32/libcrypto.so.5
OLD_LIBS+=usr/lib32/libctf.so.1
OLD_LIBS+=usr/lib32/libdevinfo.so.4
OLD_LIBS+=usr/lib32/libdevstat.so.6
OLD_LIBS+=usr/lib32/libdialog.so.6
OLD_LIBS+=usr/lib32/libdtrace.so.1
OLD_LIBS+=usr/lib32/libdwarf.so.1
OLD_LIBS+=usr/lib32/libedit.so.6
OLD_LIBS+=usr/lib32/libfetch.so.5
OLD_LIBS+=usr/lib32/libform.so.4
OLD_LIBS+=usr/lib32/libformw.so.4
OLD_LIBS+=usr/lib32/libftpio.so.7
OLD_LIBS+=usr/lib32/libgeom.so.4
OLD_LIBS+=usr/lib32/libgnuregex.so.4
OLD_LIBS+=usr/lib32/libgpib.so.2
OLD_LIBS+=usr/lib32/libhistory.so.7
OLD_LIBS+=usr/lib32/libipsec.so.3
OLD_LIBS+=usr/lib32/libipx.so.4
OLD_LIBS+=usr/lib32/libkiconv.so.3
OLD_LIBS+=usr/lib32/libkvm.so.4
OLD_LIBS+=usr/lib32/libmagic.so.3
OLD_LIBS+=usr/lib32/libmd.so.4
OLD_LIBS+=usr/lib32/libmemstat.so.2
OLD_LIBS+=usr/lib32/libmenu.so.4
OLD_LIBS+=usr/lib32/libmenuw.so.4
OLD_LIBS+=usr/lib32/libmilter.so.4
OLD_LIBS+=usr/lib32/libncp.so.3
OLD_LIBS+=usr/lib32/libncurses.so.7
OLD_LIBS+=usr/lib32/libncursesw.so.7
OLD_LIBS+=usr/lib32/libnetgraph.so.3
OLD_LIBS+=usr/lib32/libngatm.so.3
OLD_LIBS+=usr/lib32/libnvpair.so.1
OLD_LIBS+=usr/lib32/libobjc.so.3
OLD_LIBS+=usr/lib32/libopie.so.5
OLD_LIBS+=usr/lib32/libpam.so.4
OLD_LIBS+=usr/lib32/libpanel.so.4
OLD_LIBS+=usr/lib32/libpanelw.so.4
OLD_LIBS+=usr/lib32/libpcap.so.6
OLD_LIBS+=usr/lib32/libpmc.so.4
OLD_LIBS+=usr/lib32/libproc.so.1
OLD_LIBS+=usr/lib32/libradius.so.3
OLD_LIBS+=usr/lib32/libreadline.so.7
OLD_LIBS+=usr/lib32/librpcsvc.so.4
OLD_LIBS+=usr/lib32/libsbuf.so.4
OLD_LIBS+=usr/lib32/libsdp.so.3
OLD_LIBS+=usr/lib32/libsmb.so.3
OLD_LIBS+=usr/lib32/libssh.so.4
OLD_LIBS+=usr/lib32/libssl.so.5
OLD_LIBS+=usr/lib32/libtacplus.so.3
OLD_LIBS+=usr/lib32/libufs.so.4
OLD_LIBS+=usr/lib32/libugidfw.so.3
OLD_LIBS+=usr/lib32/libumem.so.1
OLD_LIBS+=usr/lib32/libusb.so.1
OLD_LIBS+=usr/lib32/libusbhid.so.3
OLD_LIBS+=usr/lib32/libutil.so.7
OLD_LIBS+=usr/lib32/libuutil.so.1
OLD_LIBS+=usr/lib32/libvgl.so.5
OLD_LIBS+=usr/lib32/libwrap.so.5
OLD_LIBS+=usr/lib32/libypclnt.so.3
OLD_LIBS+=usr/lib32/libz.so.4
OLD_LIBS+=usr/lib32/libzfs.so.1
OLD_LIBS+=usr/lib32/libzpool.so.1
OLD_LIBS+=usr/lib32/pam_chroot.so.4
OLD_LIBS+=usr/lib32/pam_deny.so.4
OLD_LIBS+=usr/lib32/pam_echo.so.4
OLD_LIBS+=usr/lib32/pam_exec.so.4
OLD_LIBS+=usr/lib32/pam_ftpusers.so.4
OLD_LIBS+=usr/lib32/pam_group.so.4
OLD_LIBS+=usr/lib32/pam_guest.so.4
OLD_LIBS+=usr/lib32/pam_krb5.so.4
OLD_LIBS+=usr/lib32/pam_ksu.so.4
OLD_LIBS+=usr/lib32/pam_lastlog.so.4
OLD_LIBS+=usr/lib32/pam_login_access.so.4
OLD_LIBS+=usr/lib32/pam_nologin.so.4
OLD_LIBS+=usr/lib32/pam_opie.so.4
OLD_LIBS+=usr/lib32/pam_opieaccess.so.4
OLD_LIBS+=usr/lib32/pam_passwdqc.so.4
OLD_LIBS+=usr/lib32/pam_permit.so.4
OLD_LIBS+=usr/lib32/pam_radius.so.4
OLD_LIBS+=usr/lib32/pam_rhosts.so.4
OLD_LIBS+=usr/lib32/pam_rootok.so.4
OLD_LIBS+=usr/lib32/pam_securetty.so.4
OLD_LIBS+=usr/lib32/pam_self.so.4
OLD_LIBS+=usr/lib32/pam_ssh.so.4
OLD_LIBS+=usr/lib32/pam_tacplus.so.4
OLD_LIBS+=usr/lib32/pam_unix.so.4
# 20090718: the gdm pam.d file is no longer required.
OLD_FILES+=etc/pam.d/gdm
# 20090714: net_add_domain(9) renamed to domain_add(9)
OLD_FILES+=usr/share/man/man9/net_add_domain.9.gz
# 20090713: vimage container structs removed.
OLD_FILES+=usr/include/netinet/vinet.h
OLD_FILES+=usr/include/netinet6/vinet6.h
OLD_FILES+=usr/include/netipsec/vipsec.h
# 20090712: ieee80211.4 -> net80211.4
OLD_FILES+=usr/share/man/man4/ieee80211.4.gz
# 20090711: typo fixed, kproc_resume,.9 -> kproc_resume.9
OLD_FILES+=usr/share/man/man9/kproc_resume,.9.gz
# 20090709: msgctl.3 msgget.3 msgrcv.3 msgsnd.3 manual pages moved
OLD_FILES+=usr/share/man/man3/msgctl.3.gz
OLD_FILES+=usr/share/man/man3/msgget.3.gz
OLD_FILES+=usr/share/man/man3/msgrcv.3.gz
OLD_FILES+=usr/share/man/man3/msgsnd.3.gz
# 20090630: old kernel RPC implementation removal
OLD_FILES+=usr/include/nfs/rpcv2.h
# 20090624: update usbdi(9)
OLD_FILES+=usr/share/man/man9/usbd_abort_default_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_abort_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_alloc_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_alloc_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_stall.9.gz
OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_stall_async.9.gz
OLD_FILES+=usr/share/man/man9/usbd_clear_endpoint_toggle.9.gz
OLD_FILES+=usr/share/man/man9/usbd_close_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_device2interface_handle.9.gz
OLD_FILES+=usr/share/man/man9/usbd_do_request_async.9.gz
OLD_FILES+=usr/share/man/man9/usbd_do_request_flags_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_endpoint_count.9.gz
OLD_FILES+=usr/share/man/man9/usbd_find_edesc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_find_idesc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_free_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_free_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_config.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_config_desc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_config_desc_full.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_config_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_device_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_endpoint_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_interface_altindex.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_interface_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_no_alts.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_quirks.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_speed.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_string.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_string_desc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_get_xfer_status.9.gz
OLD_FILES+=usr/share/man/man9/usbd_interface2device_handle.9.gz
OLD_FILES+=usr/share/man/man9/usbd_interface2endpoint_descriptor.9.gz
OLD_FILES+=usr/share/man/man9/usbd_interface_count.9.gz
OLD_FILES+=usr/share/man/man9/usbd_open_pipe.9.gz
OLD_FILES+=usr/share/man/man9/usbd_open_pipe_intr.9.gz
OLD_FILES+=usr/share/man/man9/usbd_pipe2device_handle.9.gz
OLD_FILES+=usr/share/man/man9/usbd_set_config_index.9.gz
OLD_FILES+=usr/share/man/man9/usbd_set_config_no.9.gz
OLD_FILES+=usr/share/man/man9/usbd_set_interface.9.gz
OLD_FILES+=usr/share/man/man9/usbd_setup_default_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_setup_isoc_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_setup_xfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_sync_transfer.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer.9.gz
OLD_FILES+=usr/share/man/man9/usb_find_desc.9.gz
# 20090623: number of headers needed for a usb driver reduced
OLD_FILES+=usr/include/dev/usb/usb_defs.h
OLD_FILES+=usr/include/dev/usb/usb_error.h
OLD_FILES+=usr/include/dev/usb/usb_handle_request.h
OLD_FILES+=usr/include/dev/usb/usb_hid.h
OLD_FILES+=usr/include/dev/usb/usb_lookup.h
OLD_FILES+=usr/include/dev/usb/usb_mfunc.h
OLD_FILES+=usr/include/dev/usb/usb_parse.h
OLD_FILES+=usr/include/dev/usb/usb_revision.h
# 20090609: devclass_add_driver is no longer public
OLD_FILES+=usr/share/man/man9/devclass_add_driver.9.gz
OLD_FILES+=usr/share/man/man9/devclass_delete_driver.9.gz
OLD_FILES+=usr/share/man/man9/devclass_find_driver.9.gz
# 20090605: removal of clists
OLD_FILES+=usr/include/sys/clist.h
# 20090602: removal of window(1)
OLD_FILES+=usr/bin/window
OLD_FILES+=usr/share/man/man1/window.1.gz
# 20090531: bind 9.6.1rc1 import
OLD_LIBS+=usr/lib/liblwres.so.30
# 20090530: removal of early.sh
OLD_FILES+=etc/rc.d/early.sh
# 20090527: renaming of S{LIST,TAILQ}_REMOVE_NEXT() to _REMOVE_AFTER()
OLD_FILES+=usr/share/man/man3/SLIST_REMOVE_NEXT.3.gz
OLD_FILES+=usr/share/man/man3/STAILQ_REMOVE_NEXT.3.gz
# 20090527: removal of legacy USB stack
OLD_FILES+=usr/include/legacy/dev/usb/dsbr100io.h
OLD_FILES+=usr/include/legacy/dev/usb/ehcireg.h
OLD_FILES+=usr/include/legacy/dev/usb/ehcivar.h
OLD_FILES+=usr/include/legacy/dev/usb/hid.h
OLD_FILES+=usr/include/legacy/dev/usb/if_urtwreg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_urtwvar.h
OLD_FILES+=usr/include/legacy/dev/usb/ohcireg.h
OLD_FILES+=usr/include/legacy/dev/usb/ohcivar.h
OLD_FILES+=usr/include/legacy/dev/usb/rio500_usb.h
OLD_FILES+=usr/include/legacy/dev/usb/rt2573_ucode.h
OLD_FILES+=usr/include/legacy/dev/usb/sl811hsreg.h
OLD_FILES+=usr/include/legacy/dev/usb/sl811hsvar.h
OLD_FILES+=usr/include/legacy/dev/usb/ubser.h
OLD_FILES+=usr/include/legacy/dev/usb/ucomvar.h
OLD_FILES+=usr/include/legacy/dev/usb/udbp.h
OLD_FILES+=usr/include/legacy/dev/usb/uftdireg.h
OLD_FILES+=usr/include/legacy/dev/usb/ugraphire_rdesc.h
OLD_FILES+=usr/include/legacy/dev/usb/uhcireg.h
OLD_FILES+=usr/include/legacy/dev/usb/uhcivar.h
OLD_FILES+=usr/include/legacy/dev/usb/usb.h
OLD_FILES+=usr/include/legacy/dev/usb/usb_mem.h
OLD_FILES+=usr/include/legacy/dev/usb/usb_port.h
OLD_FILES+=usr/include/legacy/dev/usb/usb_quirks.h
OLD_FILES+=usr/include/legacy/dev/usb/usbcdc.h
OLD_FILES+=usr/include/legacy/dev/usb/usbdi.h
OLD_FILES+=usr/include/legacy/dev/usb/usbdi_util.h
OLD_FILES+=usr/include/legacy/dev/usb/usbdivar.h
OLD_FILES+=usr/include/legacy/dev/usb/usbhid.h
OLD_FILES+=usr/include/legacy/dev/usb/uxb360gp_rdesc.h
OLD_DIRS+=usr/include/legacy/dev/usb
OLD_DIRS+=usr/include/legacy/dev
OLD_DIRS+=usr/include/legacy
# 20090526: removal of makekey(8)
OLD_FILES+=usr/libexec/makekey
OLD_FILES+=usr/share/man/man8/makekey.8.gz
# 20090522: removal of University of Michigan NFSv4 client
OLD_FILES+=etc/rc.d/idmapd
OLD_FILES+=sbin/idmapd
OLD_FILES+=sbin/mount_nfs4
OLD_FILES+=usr/share/man/man8/idmapd.8.gz
OLD_FILES+=usr/share/man/man8/mount_nfs4.8.gz
# 20090513: removal of legacy versions of USB network interface drivers
OLD_FILES+=usr/include/legacy/dev/usb/if_upgtvar.h
OLD_FILES+=usr/include/legacy/dev/usb/usb_ethersubr.h
# 20090417: removal of legacy versions of USB network interface drivers
OLD_FILES+=usr/include/legacy/dev/usb/if_auereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_axereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_cdcereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_cuereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_kuereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_ruereg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_rumreg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_rumvar.h
OLD_FILES+=usr/include/legacy/dev/usb/if_udavreg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_uralreg.h
OLD_FILES+=usr/include/legacy/dev/usb/if_uralvar.h
OLD_FILES+=usr/include/legacy/dev/usb/if_zydfw.h
OLD_FILES+=usr/include/legacy/dev/usb/if_zydreg.h
OLD_FILES+=usr/include/legacy/dev/usb/kue_fw.h
# 20090416: removal of ar(4), ray(4), sr(4), raycontrol(8)
OLD_FILES+=usr/sbin/raycontrol
OLD_FILES+=usr/share/man/man4/i386/ar.4.gz
OLD_FILES+=usr/share/man/man4/i386/ray.4.gz
OLD_FILES+=usr/share/man/man4/i386/sr.4.gz
OLD_FILES+=usr/share/man/man8/raycontrol.8.gz
# 20090410: VOP_LEASE.9 removed
OLD_FILES+=usr/share/man/man9/VOP_LEASE.9.gz
# 20090406: usb_sw_transfer.h removed
OLD_FILES+=usr/include/dev/usb/usb_sw_transfer.h
# 20090405: removal of if_ppp(4) and if_sl(4)
OLD_FILES+=sbin/slattach rescue/slattach
OLD_FILES+=sbin/startslip rescue/startslip
OLD_FILES+=usr/include/net/if_ppp.h
OLD_FILES+=usr/include/net/if_pppvar.h
OLD_FILES+=usr/include/net/if_slvar.h
OLD_FILES+=usr/include/net/ppp_comp.h
OLD_FILES+=usr/include/net/slip.h
OLD_FILES+=usr/sbin/sliplogin
OLD_FILES+=usr/sbin/slstat
OLD_FILES+=usr/sbin/pppd
OLD_FILES+=usr/sbin/pppstats
OLD_FILES+=usr/share/man/man1/startslip.1.gz
OLD_FILES+=usr/share/man/man4/if_ppp.4.gz
OLD_FILES+=usr/share/man/man4/if_sl.4.gz
OLD_FILES+=usr/share/man/man4/ppp.4.gz
OLD_FILES+=usr/share/man/man4/sl.4.gz
OLD_FILES+=usr/share/man/man8/pppd.8.gz
OLD_FILES+=usr/share/man/man8/pppstats.8.gz
OLD_FILES+=usr/share/man/man8/slattach.8.gz
OLD_FILES+=usr/share/man/man8/slip.8.gz
OLD_FILES+=usr/share/man/man8/sliplogin.8.gz
OLD_FILES+=usr/share/man/man8/slstat.8.gz
# 20090321: libpcap upgraded to 1.0.0
OLD_LIBS+=lib/libpcap.so.5
OLD_LIBS+=usr/lib32/libpcap.so.5
# 20090319: uscanner(4) has been removed
OLD_FILES+=usr/share/man/man4/uscanner.4.gz
# 20090313: k8temp(4) renamed to amdtemp(4)
OLD_FILES+=usr/share/man/man4/k8temp.4.gz
# 20090308: libusb.so.1 renamed
OLD_LIBS+=usr/lib/libusb20.so.1
OLD_FILES+=usr/lib/libusb20.a
OLD_FILES+=usr/lib/libusb20.so
OLD_FILES+=usr/lib/libusb20_p.a
OLD_FILES+=usr/include/libusb20_compat01.h
OLD_FILES+=usr/include/libusb20_compat10.h
OLD_LIBS+=usr/lib32/libusb20.so.1
OLD_FILES+=usr/lib32/libusb20.a
OLD_FILES+=usr/lib32/libusb20.so
OLD_FILES+=usr/lib32/libusb20_p.a
# 20090226: libmp(3) functions renamed
OLD_LIBS+=usr/lib/libmp.so.6
OLD_LIBS+=usr/lib32/libmp.so.6
# 20090223: changeover of USB stacks
OLD_FILES+=usr/include/dev/usb2/include/ufm2_ioctl.h
OLD_FILES+=usr/include/dev/usb2/include/urio2_ioctl.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_cdc.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_defs.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_devid.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_devtable.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_endian.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_error.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_hid.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_ioctl.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_mfunc.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_revision.h
OLD_FILES+=usr/include/dev/usb2/include/usb2_standard.h
OLD_DIRS+=usr/include/dev/usb2/include
OLD_DIRS+=usr/include/dev/usb2
OLD_FILES+=usr/include/dev/usb/dsbr100io.h
OLD_FILES+=usr/include/dev/usb/ehcireg.h
OLD_FILES+=usr/include/dev/usb/ehcivar.h
OLD_FILES+=usr/include/dev/usb/hid.h
OLD_FILES+=usr/include/dev/usb/if_auereg.h
OLD_FILES+=usr/include/dev/usb/if_axereg.h
OLD_FILES+=usr/include/dev/usb/if_cdcereg.h
OLD_FILES+=usr/include/dev/usb/if_cuereg.h
OLD_FILES+=usr/include/dev/usb/if_kuereg.h
OLD_FILES+=usr/include/dev/usb/if_ruereg.h
OLD_FILES+=usr/include/dev/usb/if_rumreg.h
OLD_FILES+=usr/include/dev/usb/if_rumvar.h
OLD_FILES+=usr/include/dev/usb/if_udavreg.h
OLD_FILES+=usr/include/dev/usb/if_upgtvar.h
OLD_FILES+=usr/include/dev/usb/if_uralreg.h
OLD_FILES+=usr/include/dev/usb/if_uralvar.h
OLD_FILES+=usr/include/dev/usb/if_urtwreg.h
OLD_FILES+=usr/include/dev/usb/if_urtwvar.h
OLD_FILES+=usr/include/dev/usb/if_zydfw.h
OLD_FILES+=usr/include/dev/usb/if_zydreg.h
OLD_FILES+=usr/include/dev/usb/kue_fw.h
OLD_FILES+=usr/include/dev/usb/ohcireg.h
OLD_FILES+=usr/include/dev/usb/ohcivar.h
OLD_FILES+=usr/include/dev/usb/rio500_usb.h
OLD_FILES+=usr/include/dev/usb/rt2573_ucode.h
OLD_FILES+=usr/include/dev/usb/sl811hsreg.h
OLD_FILES+=usr/include/dev/usb/sl811hsvar.h
OLD_FILES+=usr/include/dev/usb/ubser.h
OLD_FILES+=usr/include/dev/usb/ucomvar.h
OLD_FILES+=usr/include/dev/usb/udbp.h
OLD_FILES+=usr/include/dev/usb/uftdireg.h
OLD_FILES+=usr/include/dev/usb/ugraphire_rdesc.h
OLD_FILES+=usr/include/dev/usb/uhcireg.h
OLD_FILES+=usr/include/dev/usb/uhcivar.h
OLD_FILES+=usr/include/dev/usb/usb_ethersubr.h
OLD_FILES+=usr/include/dev/usb/usb_mem.h
OLD_FILES+=usr/include/dev/usb/usb_port.h
OLD_FILES+=usr/include/dev/usb/usb_quirks.h
OLD_FILES+=usr/include/dev/usb/usbcdc.h
OLD_FILES+=usr/include/dev/usb/usbdivar.h
OLD_FILES+=usr/include/dev/usb/uxb360gp_rdesc.h
OLD_FILES+=usr/sbin/usbdevs
OLD_FILES+=usr/share/man/man8/usbdevs.8.gz
# 20090203: removal of pccard header files
OLD_FILES+=usr/include/pccard/cardinfo.h
OLD_FILES+=usr/include/pccard/cis.h
OLD_DIRS+=usr/include/pccard
# 20090203: adding_user.8 moved to adding_user.7
OLD_FILES+=usr/share/man/man8/adding_user.8.gz
# 20090122: tzdata2009a import
OLD_FILES+=usr/share/zoneinfo/Asia/Katmandu
# 20090102: file 4.26 import
OLD_FILES+=usr/share/misc/magic.mime
OLD_FILES+=usr/share/misc/magic.mime.mgc
# 20081223: bind 9.4.3 import, nsupdate.8 moved to nsupdate.1
OLD_FILES+=usr/share/man/man8/nsupdate.8.gz
# 20081223: ipprotosw.h removed
OLD_FILES+=usr/include/netinet/ipprotosw.h
# 20081123: vfs_mountedon.9 removed
OLD_FILES+=usr/share/man/man9/vfs_mountedon.9.gz
# 20081023: FREE.9 and MALLOC.9 removed
OLD_FILES+=usr/share/man/man9/FREE.9.gz
OLD_FILES+=usr/share/man/man9/MALLOC.9.gz
# 20080928: removal of inaccurate device_ids(9) manual page
OLD_FILES+=usr/share/man/man9/device_ids.9.gz
OLD_FILES+=usr/share/man/man9/major.9.gz
OLD_FILES+=usr/share/man/man9/minor.9.gz
OLD_FILES+=usr/share/man/man9/umajor.9.gz
OLD_FILES+=usr/share/man/man9/uminor.9.gz
# 20080917: removal of manpage for axed kernel primitive suser(9)
OLD_FILES+=usr/share/man/man9/suser.9.gz
OLD_FILES+=usr/share/man/man9/suser_cred.9.gz
# 20080913: pax removed from rescue
OLD_FILES+=rescue/pax
# 20080823: removal of unneeded pt_chown, to implement grantpt(3)
OLD_FILES+=usr/libexec/pt_chown
# 20080822: ntp 4.2.4p5 import
OLD_FILES+=usr/share/doc/ntp/driver23.html
OLD_FILES+=usr/share/doc/ntp/driver24.html
# 20080821: several man pages moved from man4.i386 to man4
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/i386/acpi_aiboost.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_asus.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_fujitsu.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_ibm.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_panasonic.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_sony.4.gz
OLD_FILES+=usr/share/man/man4/i386/acpi_toshiba.4.gz
OLD_FILES+=usr/share/man/man4/i386/ichwd.4.gz
OLD_FILES+=usr/share/man/man4/i386/if_ndis.4.gz
OLD_FILES+=usr/share/man/man4/i386/io.4.gz
OLD_FILES+=usr/share/man/man4/i386/linux.4.gz
OLD_FILES+=usr/share/man/man4/i386/ndis.4.gz
.endif
# 20080820: MPSAFE TTY layer integrated
OLD_FILES+=usr/include/sys/linedisc.h
OLD_FILES+=usr/share/man/man3/posix_openpt.3.gz
# 20080725: sgtty.h removed
OLD_FILES+=usr/include/sgtty.h
# 20080706: bsdlabel(8) removed on powerpc
.if ${TARGET_ARCH} == "powerpc"
OLD_FILES+=sbin/bsdlabel
OLD_FILES+=usr/share/man/man8/bsdlabel.8.gz
.endif
# 20080704: sbsh(4) removed
OLD_FILES+=usr/share/man/man4/if_sbsh.4.gz
OLD_FILES+=usr/share/man/man4/sbsh.4.gz
# 20080704: cnw(4) removed
OLD_FILES+=usr/share/man/man4/if_cnw.4.gz
OLD_FILES+=usr/share/man/man4/cnw.4.gz
# 20080704: oltr(4) removed
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/share/man/man4/i386/if_oltr.4.gz
OLD_FILES+=usr/share/man/man4/i386/oltr.4.gz
.endif
# 20080704: arl(4) removed
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/sbin/arlcontrol
OLD_FILES+=usr/share/man/man4/i386/arl.4.gz
OLD_FILES+=usr/share/man/man8/arlcontrol.8.gz
.endif
# 20080703: sunlabel only for sparc64
.if ${TARGET_ARCH} != "sparc64"
OLD_FILES+=sbin/sunlabel
OLD_FILES+=usr/share/man/man8/sunlabel.8.gz
.endif
# 20080701: wpa_supplicant.conf moved to share/examples/etc/
OLD_FILES+=usr/share/examples/wpa_supplicant/wpa_supplicant.conf
OLD_DIRS+=usr/share/examples/wpa_supplicant
# 20080614: pecoff image activator removed
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/pecoff_machdep.h
.endif
# 20080614: sgtty removed
OLD_FILES+=usr/include/sys/ttychars.h
OLD_FILES+=usr/include/sys/ttydev.h
OLD_FILES+=usr/share/man/man3/gtty.3.gz
OLD_FILES+=usr/share/man/man3/stty.3.gz
# 20080609: gpt(8) removed
OLD_FILES+=sbin/gpt
OLD_FILES+=usr/share/man/man8/gpt.8.gz
# 20080525: I4B removed
OLD_FILES+=etc/isdn/answer
OLD_FILES+=etc/isdn/isdntel
OLD_FILES+=etc/isdn/record
OLD_FILES+=etc/isdn/tell
OLD_FILES+=etc/isdn/tell-record
OLD_FILES+=etc/isdn/unknown_incoming
OLD_FILES+=etc/isdn/holidays.D
OLD_FILES+=etc/isdn/isdnd.rates.A
OLD_FILES+=etc/isdn/isdnd.rates.D
OLD_FILES+=etc/isdn/isdnd.rates.F
OLD_FILES+=etc/isdn/isdnd.rates.L
OLD_FILES+=etc/isdn/isdnd.rates.UK.BT
OLD_FILES+=etc/isdn/isdnd.rc.sample
OLD_FILES+=etc/isdn/isdntel.alias.sample
OLD_DIRS+=etc/isdn
OLD_FILES+=etc/rc.d/isdnd
OLD_FILES+=usr/include/i4b/i4b_cause.h
OLD_FILES+=usr/include/i4b/i4b_debug.h
OLD_FILES+=usr/include/i4b/i4b_ioctl.h
OLD_FILES+=usr/include/i4b/i4b_rbch_ioctl.h
OLD_FILES+=usr/include/i4b/i4b_tel_ioctl.h
OLD_FILES+=usr/include/i4b/i4b_trace.h
OLD_DIRS+=usr/include/i4b
OLD_FILES+=usr/sbin/dtmfdecode
OLD_FILES+=usr/sbin/g711conv
OLD_FILES+=usr/sbin/isdnd
OLD_FILES+=usr/sbin/isdndebug
OLD_FILES+=usr/sbin/isdndecode
OLD_FILES+=usr/sbin/isdnmonitor
OLD_FILES+=usr/sbin/isdnphone
OLD_FILES+=usr/sbin/isdntel
OLD_FILES+=usr/sbin/isdntelctl
OLD_FILES+=usr/sbin/isdntrace
OLD_FILES+=usr/share/isdn/0.al
OLD_FILES+=usr/share/isdn/1.al
OLD_FILES+=usr/share/isdn/2.al
OLD_FILES+=usr/share/isdn/3.al
OLD_FILES+=usr/share/isdn/4.al
OLD_FILES+=usr/share/isdn/5.al
OLD_FILES+=usr/share/isdn/6.al
OLD_FILES+=usr/share/isdn/7.al
OLD_FILES+=usr/share/isdn/8.al
OLD_FILES+=usr/share/isdn/9.al
OLD_FILES+=usr/share/isdn/beep.al
OLD_FILES+=usr/share/isdn/msg.al
OLD_DIRS+=usr/share/isdn
OLD_FILES+=usr/share/man/man1/dtmfdecode.1.gz
OLD_FILES+=usr/share/man/man1/g711conv.1.gz
OLD_FILES+=usr/share/man/man4/i4b.4.gz
OLD_FILES+=usr/share/man/man4/i4bcapi.4.gz
OLD_FILES+=usr/share/man/man4/i4bctl.4.gz
OLD_FILES+=usr/share/man/man4/i4bing.4.gz
OLD_FILES+=usr/share/man/man4/i4bipr.4.gz
OLD_FILES+=usr/share/man/man4/i4bisppp.4.gz
OLD_FILES+=usr/share/man/man4/i4bq921.4.gz
OLD_FILES+=usr/share/man/man4/i4bq931.4.gz
OLD_FILES+=usr/share/man/man4/i4brbch.4.gz
OLD_FILES+=usr/share/man/man4/i4btel.4.gz
OLD_FILES+=usr/share/man/man4/i4btrc.4.gz
OLD_FILES+=usr/share/man/man4/iavc.4.gz
OLD_FILES+=usr/share/man/man4/isic.4.gz
OLD_FILES+=usr/share/man/man4/ifpi.4.gz
OLD_FILES+=usr/share/man/man4/ifpi2.4.gz
OLD_FILES+=usr/share/man/man4/ifpnp.4.gz
OLD_FILES+=usr/share/man/man4/ihfc.4.gz
OLD_FILES+=usr/share/man/man4/itjc.4.gz
OLD_FILES+=usr/share/man/man4/iwic.4.gz
OLD_FILES+=usr/share/man/man5/isdnd.rc.5.gz
OLD_FILES+=usr/share/man/man5/isdnd.rates.5.gz
OLD_FILES+=usr/share/man/man5/isdnd.acct.5.gz
OLD_FILES+=usr/share/man/man8/isdnd.8.gz
OLD_FILES+=usr/share/man/man8/isdndebug.8.gz
OLD_FILES+=usr/share/man/man8/isdndecode.8.gz
OLD_FILES+=usr/share/man/man8/isdnmonitor.8.gz
OLD_FILES+=usr/share/man/man8/isdnphone.8.gz
OLD_FILES+=usr/share/man/man8/isdntel.8.gz
OLD_FILES+=usr/share/man/man8/isdntelctl.8.gz
OLD_FILES+=usr/share/man/man8/isdntrace.8.gz
OLD_FILES+=usr/share/examples/isdn/contrib/README
OLD_FILES+=usr/share/examples/isdn/contrib/anleitung.ppp
OLD_FILES+=usr/share/examples/isdn/contrib/answer.c
OLD_FILES+=usr/share/examples/isdn/contrib/answer.sh
OLD_FILES+=usr/share/examples/isdn/contrib/convert.sh
OLD_FILES+=usr/share/examples/isdn/contrib/hplay.c
OLD_FILES+=usr/share/examples/isdn/contrib/i4b-ppp-newbie.txt
OLD_FILES+=usr/share/examples/isdn/contrib/isdnctl
OLD_FILES+=usr/share/examples/isdn/contrib/isdnd_acct
OLD_FILES+=usr/share/examples/isdn/contrib/isdnd_acct.pl
OLD_FILES+=usr/share/examples/isdn/contrib/isdntelmux.c
OLD_FILES+=usr/share/examples/isdn/contrib/mrtg-isp0.sh
OLD_FILES+=usr/share/examples/isdn/i4brunppp/Makefile
OLD_FILES+=usr/share/examples/isdn/i4brunppp/README
OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp-isdnd.rc
OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp.8
OLD_FILES+=usr/share/examples/isdn/i4brunppp/i4brunppp.c
OLD_FILES+=usr/share/examples/isdn/v21/Makefile
OLD_FILES+=usr/share/examples/isdn/v21/README
OLD_FILES+=usr/share/examples/isdn/v21/v21modem.c
OLD_FILES+=usr/share/examples/isdn/FAQ
OLD_FILES+=usr/share/examples/isdn/KERNEL
OLD_FILES+=usr/share/examples/isdn/Overview
OLD_FILES+=usr/share/examples/isdn/README
OLD_FILES+=usr/share/examples/isdn/ROADMAP
OLD_FILES+=usr/share/examples/isdn/ReleaseNotes
OLD_FILES+=usr/share/examples/isdn/Resources
OLD_FILES+=usr/share/examples/isdn/SupportedCards
OLD_FILES+=usr/share/examples/isdn/ThankYou
OLD_DIRS+=usr/share/examples/isdn/contrib
OLD_DIRS+=usr/share/examples/isdn/i4brunppp
OLD_DIRS+=usr/share/examples/isdn/v21
OLD_DIRS+=usr/share/examples/isdn
OLD_FILES+=usr/share/examples/ppp/isdnd.rc
OLD_FILES+=usr/share/examples/ppp/ppp.conf.isdn
# 20080525: ng_atmpif removed
OLD_FILES+=usr/include/netgraph/atm/ng_atmpif.h
OLD_FILES+=usr/share/man/man4/ng_atmpif.4.gz
# 20080522: pmap_addr_hint removed
OLD_FILES+=usr/share/man/man9/pmap_addr_hint.9.gz
# 20080517: ipsec_osdep.h removed
OLD_FILES+=usr/include/netipsec/ipsec_osdep.h
# 20080507: heimdal 1.1 import
OLD_LIBS+=usr/lib/libasn1.so.9
OLD_LIBS+=usr/lib/libgssapi.so.9
OLD_LIBS+=usr/lib/libgssapi_krb5.so.9
OLD_LIBS+=usr/lib/libhdb.so.9
OLD_LIBS+=usr/lib/libkadm5clnt.so.9
OLD_LIBS+=usr/lib/libkadm5srv.so.9
OLD_LIBS+=usr/lib/libkafs5.so.9
OLD_LIBS+=usr/lib/libkrb5.so.9
OLD_LIBS+=usr/lib/libroken.so.9
OLD_LIBS+=usr/lib32/libgssapi.so.9
# 20080420: Symbol card support dropped
OLD_FILES+=usr/include/dev/wi/spectrum24t_cf.h
# 20080420: awi removal
OLD_FILES+=usr/share/man/man4/awi.4.gz
OLD_FILES+=usr/share/man/man4/if_awi.4.gz
# 20080331: pkg_sign has been removed
OLD_FILES+=usr/sbin/pkg_check
OLD_FILES+=usr/sbin/pkg_sign
OLD_FILES+=usr/share/man/man1/pkg_check.1.gz
OLD_FILES+=usr/share/man/man1/pkg_sign.1.gz
# 20080325: tzdata2008b import
OLD_FILES+=usr/share/zoneinfo/Asia/Calcutta
OLD_FILES+=usr/share/zoneinfo/Asia/Saigon
# 20080314: stack_print(9) mlink fixed
OLD_FILES+=usr/share/man/man9/stack_printf.9.gz
# 20080312: libkse removal
OLD_FILES+=usr/include/sys/kse.h
OLD_FILES+=usr/lib/libkse.so
OLD_LIBS+=usr/lib/libkse.so.3
OLD_FILES+=usr/share/man/man2/kse.2.gz
OLD_FILES+=usr/share/man/man2/kse_create.2.gz
OLD_FILES+=usr/share/man/man2/kse_exit.2.gz
OLD_FILES+=usr/share/man/man2/kse_release.2.gz
OLD_FILES+=usr/share/man/man2/kse_switchin.2.gz
OLD_FILES+=usr/share/man/man2/kse_thr_interrupt.2.gz
OLD_FILES+=usr/share/man/man2/kse_wakeup.2.gz
OLD_FILES+=usr/lib32/libkse.so
OLD_LIBS+=usr/lib32/libkse.so.3
# 20080225: bsdar/bsdranlib rename to ar/ranlib
OLD_FILES+=usr/bin/bsdar
OLD_FILES+=usr/bin/bsdranlib
OLD_FILES+=usr/share/man/man1/bsdar.1.gz
OLD_FILES+=usr/share/man/man1/bsdranlib.1.gz
# 20080220: geom_lvm rename to geom_linux_lvm
OLD_FILES+=usr/share/man/man4/geom_lvm.4.gz
# 20080126: oldcard.4 removal
OLD_FILES+=usr/share/man/man4/card.4.gz
OLD_FILES+=usr/share/man/man4/oldcard.4.gz
# 20080122: Removed from the tree
OLD_FILES+=usr/share/man/man9/BUF_REFCNT.9.gz
# 20080108: Moved to section 2
OLD_FILES+=usr/share/man/man3/shm_open.3.gz
OLD_FILES+=usr/share/man/man3/shm_unlink.3.gz
# 20071207: Merged with fortunes-o.real
OLD_FILES+=usr/share/games/fortune/fortunes2-o
OLD_FILES+=usr/share/games/fortune/fortunes2-o.dat
# 20071201: Removal of XRPU driver
OLD_FILES+=usr/include/sys/xrpuio.h
# 20071129: Disabled static versions of libkse by default
OLD_FILES+=usr/lib/libkse.a
OLD_FILES+=usr/lib/libkse_p.a
OLD_FILES+=usr/lib/libkse_pic.a
OLD_FILES+=usr/lib32/libkse.a
OLD_FILES+=usr/lib32/libkse_p.a
OLD_FILES+=usr/lib32/libkse_pic.a
# 20071129: Removed a Solaris compatibility header
OLD_FILES+=usr/include/sys/_elf_solaris.h
# 20071125: Renamed to pmc_get_msr()
OLD_FILES+=usr/share/man/man3/pmc_x86_get_msr.3.gz
# 20071108: Removed very crunch OLDCARD support file
OLD_FILES+=etc/defaults/pccard.conf
# 20071025: rc.d/nfslocking superseded by rc.d/lockd and rc.d/statd
OLD_FILES+=etc/rc.d/nfslocking
# 20070930: rename of cached to nscd
OLD_FILES+=etc/cached.conf
OLD_FILES+=etc/rc.d/cached
OLD_FILES+=usr/sbin/cached
OLD_FILES+=usr/share/man/man5/cached.conf.5.gz
OLD_FILES+=usr/share/man/man8/cached.8.gz
# 20070807: removal of PowerPC specific header file.
.if ${TARGET_ARCH} == "powerpc"
OLD_FILES+=usr/include/machine/interruptvar.h
.endif
# 20070801: fast_ipsec.4 gone
OLD_FILES+=usr/share/man/man4/fast_ipsec.4.gz
# 20070715: netatm temporarily disconnected (removed 20080525)
OLD_FILES+=rescue/atm
OLD_FILES+=rescue/fore_dnld
OLD_FILES+=rescue/ilmid
OLD_FILES+=sbin/atm
OLD_FILES+=sbin/fore_dnld
OLD_FILES+=sbin/ilmid
OLD_FILES+=usr/include/libatm.h
OLD_FILES+=usr/include/netatm/atm.h
OLD_FILES+=usr/include/netatm/atm_cm.h
OLD_FILES+=usr/include/netatm/atm_if.h
OLD_FILES+=usr/include/netatm/atm_ioctl.h
OLD_FILES+=usr/include/netatm/atm_pcb.h
OLD_FILES+=usr/include/netatm/atm_sap.h
OLD_FILES+=usr/include/netatm/atm_sigmgr.h
OLD_FILES+=usr/include/netatm/atm_stack.h
OLD_FILES+=usr/include/netatm/atm_sys.h
OLD_FILES+=usr/include/netatm/atm_var.h
OLD_FILES+=usr/include/netatm/atm_vc.h
OLD_FILES+=usr/include/netatm/ipatm/ipatm.h
OLD_FILES+=usr/include/netatm/ipatm/ipatm_serv.h
OLD_FILES+=usr/include/netatm/ipatm/ipatm_var.h
OLD_FILES+=usr/include/netatm/port.h
OLD_FILES+=usr/include/netatm/queue.h
OLD_FILES+=usr/include/netatm/sigpvc/sigpvc_var.h
OLD_FILES+=usr/include/netatm/spans/spans_cls.h
OLD_FILES+=usr/include/netatm/spans/spans_kxdr.h
OLD_FILES+=usr/include/netatm/spans/spans_var.h
OLD_FILES+=usr/include/netatm/uni/sscf_uni.h
OLD_FILES+=usr/include/netatm/uni/sscf_uni_var.h
OLD_FILES+=usr/include/netatm/uni/sscop.h
OLD_FILES+=usr/include/netatm/uni/sscop_misc.h
OLD_FILES+=usr/include/netatm/uni/sscop_pdu.h
OLD_FILES+=usr/include/netatm/uni/sscop_var.h
OLD_FILES+=usr/include/netatm/uni/uni.h
OLD_FILES+=usr/include/netatm/uni/uniip_var.h
OLD_FILES+=usr/include/netatm/uni/unisig.h
OLD_FILES+=usr/include/netatm/uni/unisig_decode.h
OLD_FILES+=usr/include/netatm/uni/unisig_mbuf.h
OLD_FILES+=usr/include/netatm/uni/unisig_msg.h
OLD_FILES+=usr/include/netatm/uni/unisig_print.h
OLD_FILES+=usr/include/netatm/uni/unisig_var.h
OLD_FILES+=usr/lib/libatm.a
OLD_FILES+=usr/lib/libatm_p.a
OLD_FILES+=usr/sbin/atmarpd
OLD_FILES+=usr/sbin/scspd
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/atm.8.gz
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/atmarpd.8.gz
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/fore_dnld.8.gz
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/ilmid.8.gz
OLD_FILES+=usr/share/man/en.ISO8859-1/man8/scspd.8.gz
OLD_FILES+=usr/share/man/man8/atm.8.gz
OLD_FILES+=usr/share/man/man8/atmarpd.8.gz
OLD_FILES+=usr/share/man/man8/fore_dnld.8.gz
OLD_FILES+=usr/share/man/man8/ilmid.8.gz
OLD_FILES+=usr/share/man/man8/scspd.8.gz
OLD_FILES+=usr/share/examples/atm/NOTES
OLD_FILES+=usr/share/examples/atm/README
OLD_FILES+=usr/share/examples/atm/Startup
OLD_FILES+=usr/share/examples/atm/atm-config.sh
OLD_FILES+=usr/share/examples/atm/atm-sockets.txt
OLD_FILES+=usr/share/examples/atm/cpcs-design.txt
OLD_FILES+=usr/share/examples/atm/fore-microcode.txt
OLD_FILES+=usr/share/examples/atm/sscf-design.txt
OLD_FILES+=usr/share/examples/atm/sscop-design.txt
OLD_LIBS+=lib/libatm.so.5
OLD_LIBS+=usr/lib/libatm.so
OLD_DIRS+=usr/include/netatm/sigpvc
OLD_DIRS+=usr/include/netatm/spans
OLD_DIRS+=usr/include/netatm/ipatm
OLD_DIRS+=usr/include/netatm/uni
OLD_DIRS+=usr/include/netatm
OLD_DIRS+=usr/share/examples/atm
OLD_FILES+=usr/lib32/libatm.a
OLD_FILES+=usr/lib32/libatm.so
OLD_LIBS+=usr/lib32/libatm.so.5
OLD_FILES+=usr/lib32/libatm_p.a
# 20070705: I4B headers repo-copied to include/i4b/
.if ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/machine/i4b_cause.h
OLD_FILES+=usr/include/machine/i4b_debug.h
OLD_FILES+=usr/include/machine/i4b_ioctl.h
OLD_FILES+=usr/include/machine/i4b_rbch_ioctl.h
OLD_FILES+=usr/include/machine/i4b_tel_ioctl.h
OLD_FILES+=usr/include/machine/i4b_trace.h
.endif
# 20070703: pf 4.1 import
OLD_FILES+=usr/libexec/ftp-proxy
# 20070701: KAME IPSec removal
OLD_FILES+=usr/include/netinet6/ah.h
OLD_FILES+=usr/include/netinet6/ah6.h
OLD_FILES+=usr/include/netinet6/ah_aesxcbcmac.h
OLD_FILES+=usr/include/netinet6/esp.h
OLD_FILES+=usr/include/netinet6/esp6.h
OLD_FILES+=usr/include/netinet6/esp_aesctr.h
OLD_FILES+=usr/include/netinet6/esp_camellia.h
OLD_FILES+=usr/include/netinet6/esp_rijndael.h
OLD_FILES+=usr/include/netinet6/ipsec.h
OLD_FILES+=usr/include/netinet6/ipsec6.h
OLD_FILES+=usr/include/netinet6/ipcomp.h
OLD_FILES+=usr/include/netinet6/ipcomp6.h
OLD_FILES+=usr/include/netkey/key.h
OLD_FILES+=usr/include/netkey/key_debug.h
OLD_FILES+=usr/include/netkey/key_var.h
OLD_FILES+=usr/include/netkey/keydb.h
OLD_FILES+=usr/include/netkey/keysock.h
OLD_DIRS+=usr/include/netkey
# 20070701: remove wicontrol
OLD_FILES+=usr/sbin/wicontrol
OLD_FILES+=usr/share/man/man8/wicontrol.8.gz
# 20070625: umapfs removal
OLD_FILES+=rescue/mount_umapfs
OLD_FILES+=sbin/mount_umapfs
OLD_FILES+=usr/include/fs/umapfs/umap.h
OLD_FILES+=usr/share/man/man8/mount_umapfs.8.gz
OLD_DIRS+=usr/include/fs/umapfs
# 20070618: Removal of the PROTO.localhost* files
OLD_FILES+=etc/namedb/PROTO.localhost-v6.rev
OLD_FILES+=etc/namedb/PROTO.localhost.rev
OLD_FILES+=etc/namedb/make-localhost
# 20070618: shared library version bump
OLD_LIBS+=lib/libalias.so.5
OLD_LIBS+=lib/libbsnmp.so.3
OLD_LIBS+=lib/libncurses.so.6
OLD_LIBS+=lib/libncursesw.so.6
OLD_LIBS+=lib/libreadline.so.6
OLD_LIBS+=usr/lib/libdialog.so.5
OLD_LIBS+=usr/lib/libgnuregex.so.3
OLD_LIBS+=usr/lib/libhistory.so.6
OLD_LIBS+=usr/lib/libpam.so.3
OLD_LIBS+=usr/lib/libssh.so.3
OLD_LIBS+=usr/lib/pam_chroot.so.3
OLD_LIBS+=usr/lib/pam_deny.so.3
OLD_LIBS+=usr/lib/pam_echo.so.3
OLD_LIBS+=usr/lib/pam_exec.so.3
OLD_LIBS+=usr/lib/pam_ftpusers.so.3
OLD_LIBS+=usr/lib/pam_group.so.3
OLD_LIBS+=usr/lib/pam_guest.so.3
OLD_LIBS+=usr/lib/pam_krb5.so.3
OLD_LIBS+=usr/lib/pam_ksu.so.3
OLD_LIBS+=usr/lib/pam_lastlog.so.3
OLD_LIBS+=usr/lib/pam_login_access.so.3
OLD_LIBS+=usr/lib/pam_nologin.so.3
OLD_LIBS+=usr/lib/pam_opie.so.3
OLD_LIBS+=usr/lib/pam_opieaccess.so.3
OLD_LIBS+=usr/lib/pam_passwdqc.so.3
OLD_LIBS+=usr/lib/pam_permit.so.3
OLD_LIBS+=usr/lib/pam_radius.so.3
OLD_LIBS+=usr/lib/pam_rhosts.so.3
OLD_LIBS+=usr/lib/pam_rootok.so.3
OLD_LIBS+=usr/lib/pam_securetty.so.3
OLD_LIBS+=usr/lib/pam_self.so.3
OLD_LIBS+=usr/lib/pam_ssh.so.3
OLD_LIBS+=usr/lib/pam_tacplus.so.3
OLD_LIBS+=usr/lib/pam_unix.so.3
OLD_LIBS+=usr/lib/snmp_atm.so.4
OLD_LIBS+=usr/lib/snmp_bridge.so.4
OLD_LIBS+=usr/lib/snmp_hostres.so.4
OLD_LIBS+=usr/lib/snmp_mibII.so.4
OLD_LIBS+=usr/lib/snmp_netgraph.so.4
OLD_LIBS+=usr/lib/snmp_pf.so.4
OLD_LIBS+=usr/lib32/libalias.so.5
OLD_LIBS+=usr/lib32/libbsnmp.so.3
OLD_LIBS+=usr/lib32/libdialog.so.5
OLD_LIBS+=usr/lib32/libgnuregex.so.3
OLD_LIBS+=usr/lib32/libhistory.so.6
OLD_LIBS+=usr/lib32/libncurses.so.6
OLD_LIBS+=usr/lib32/libncursesw.so.6
OLD_LIBS+=usr/lib32/libpam.so.3
OLD_LIBS+=usr/lib32/libreadline.so.6
OLD_LIBS+=usr/lib32/libssh.so.3
OLD_LIBS+=usr/lib32/pam_chroot.so.3
OLD_LIBS+=usr/lib32/pam_deny.so.3
OLD_LIBS+=usr/lib32/pam_echo.so.3
OLD_LIBS+=usr/lib32/pam_exec.so.3
OLD_LIBS+=usr/lib32/pam_ftpusers.so.3
OLD_LIBS+=usr/lib32/pam_group.so.3
OLD_LIBS+=usr/lib32/pam_guest.so.3
OLD_LIBS+=usr/lib32/pam_krb5.so.3
OLD_LIBS+=usr/lib32/pam_ksu.so.3
OLD_LIBS+=usr/lib32/pam_lastlog.so.3
OLD_LIBS+=usr/lib32/pam_login_access.so.3
OLD_LIBS+=usr/lib32/pam_nologin.so.3
OLD_LIBS+=usr/lib32/pam_opie.so.3
OLD_LIBS+=usr/lib32/pam_opieaccess.so.3
OLD_LIBS+=usr/lib32/pam_passwdqc.so.3
OLD_LIBS+=usr/lib32/pam_permit.so.3
OLD_LIBS+=usr/lib32/pam_radius.so.3
OLD_LIBS+=usr/lib32/pam_rhosts.so.3
OLD_LIBS+=usr/lib32/pam_rootok.so.3
OLD_LIBS+=usr/lib32/pam_securetty.so.3
OLD_LIBS+=usr/lib32/pam_self.so.3
OLD_LIBS+=usr/lib32/pam_ssh.so.3
OLD_LIBS+=usr/lib32/pam_tacplus.so.3
OLD_LIBS+=usr/lib32/pam_unix.so.3
# 20070613: IPX over IP tunnel removal
OLD_FILES+=usr/include/netipx/ipx_ip.h
# 20070605: sched_core removal
OLD_FILES+=usr/share/man/man4/sched_core.4.gz
# 20070603: BIND 9.4.1 import
OLD_LIBS+=usr/lib/liblwres.so.10
# 20070521: shared library version bump
OLD_LIBS+=lib/libatm.so.4
OLD_LIBS+=lib/libbegemot.so.2
OLD_LIBS+=lib/libbsdxml.so.2
OLD_LIBS+=lib/libcam.so.3
OLD_LIBS+=lib/libcrypt.so.3
OLD_LIBS+=lib/libdevstat.so.5
OLD_LIBS+=lib/libedit.so.5
OLD_LIBS+=lib/libgeom.so.3
OLD_LIBS+=lib/libipsec.so.2
OLD_LIBS+=lib/libipx.so.3
OLD_LIBS+=lib/libkiconv.so.2
OLD_LIBS+=lib/libkse.so.2
OLD_LIBS+=lib/libkvm.so.3
OLD_LIBS+=lib/libm.so.4
OLD_LIBS+=lib/libmd.so.3
OLD_LIBS+=lib/libpcap.so.4
OLD_LIBS+=lib/libpthread.so.2
OLD_LIBS+=lib/libsbuf.so.3
OLD_LIBS+=lib/libthr.so.2
OLD_LIBS+=lib/libufs.so.3
OLD_LIBS+=lib/libutil.so.6
OLD_LIBS+=lib/libz.so.3
OLD_LIBS+=usr/lib/libbluetooth.so.2
OLD_LIBS+=usr/lib/libbsm.so.1
OLD_LIBS+=usr/lib/libbz2.so.2
OLD_LIBS+=usr/lib/libcalendar.so.3
OLD_LIBS+=usr/lib/libcom_err.so.3
OLD_LIBS+=usr/lib/libdevinfo.so.3
OLD_LIBS+=usr/lib/libfetch.so.4
OLD_LIBS+=usr/lib/libform.so.3
OLD_LIBS+=usr/lib/libformw.so.3
OLD_LIBS+=usr/lib/libftpio.so.6
OLD_LIBS+=usr/lib/libgpib.so.1
OLD_LIBS+=usr/lib/libkse.so.2
OLD_LIBS+=usr/lib/libmagic.so.2
OLD_LIBS+=usr/lib/libmemstat.so.1
OLD_LIBS+=usr/lib/libmenu.so.3
OLD_LIBS+=usr/lib/libmenuw.so.3
OLD_LIBS+=usr/lib/libmilter.so.3
OLD_LIBS+=usr/lib/libmp.so.5
OLD_LIBS+=usr/lib/libncp.so.2
OLD_LIBS+=usr/lib/libnetgraph.so.2
OLD_LIBS+=usr/lib/libngatm.so.2
OLD_LIBS+=usr/lib/libopie.so.4
OLD_LIBS+=usr/lib/libpanel.so.3
OLD_LIBS+=usr/lib/libpanelw.so.3
OLD_LIBS+=usr/lib/libpmc.so.3
OLD_LIBS+=usr/lib/libradius.so.2
OLD_LIBS+=usr/lib/librpcsvc.so.3
OLD_LIBS+=usr/lib/libsdp.so.2
OLD_LIBS+=usr/lib/libsmb.so.2
OLD_LIBS+=usr/lib/libstdc++.so.5
OLD_LIBS+=usr/lib/libtacplus.so.2
OLD_LIBS+=usr/lib/libthr.so.2
OLD_LIBS+=usr/lib/libthread_db.so.2
OLD_LIBS+=usr/lib/libugidfw.so.2
OLD_LIBS+=usr/lib/libusbhid.so.2
OLD_LIBS+=usr/lib/libvgl.so.4
OLD_LIBS+=usr/lib/libwrap.so.4
OLD_LIBS+=usr/lib/libypclnt.so.2
OLD_LIBS+=usr/lib/snmp_bridge.so.3
OLD_LIBS+=usr/lib/snmp_hostres.so.3
OLD_LIBS+=usr/lib32/libatm.so.4
OLD_LIBS+=usr/lib32/libbegemot.so.2
OLD_LIBS+=usr/lib32/libbluetooth.so.2
OLD_LIBS+=usr/lib32/libbsdxml.so.2
OLD_LIBS+=usr/lib32/libbsm.so.1
OLD_LIBS+=usr/lib32/libbz2.so.2
OLD_LIBS+=usr/lib32/libcalendar.so.3
OLD_LIBS+=usr/lib32/libcam.so.3
OLD_LIBS+=usr/lib32/libcom_err.so.3
OLD_LIBS+=usr/lib32/libcrypt.so.3
OLD_LIBS+=usr/lib32/libdevinfo.so.3
OLD_LIBS+=usr/lib32/libdevstat.so.5
OLD_LIBS+=usr/lib32/libedit.so.5
OLD_LIBS+=usr/lib32/libfetch.so.4
OLD_LIBS+=usr/lib32/libform.so.3
OLD_LIBS+=usr/lib32/libformw.so.3
OLD_LIBS+=usr/lib32/libftpio.so.6
OLD_LIBS+=usr/lib32/libgeom.so.3
OLD_LIBS+=usr/lib32/libgpib.so.1
OLD_LIBS+=usr/lib32/libipsec.so.2
OLD_LIBS+=usr/lib32/libipx.so.3
OLD_LIBS+=usr/lib32/libkiconv.so.2
OLD_LIBS+=usr/lib32/libkse.so.2
OLD_LIBS+=usr/lib32/libkvm.so.3
OLD_LIBS+=usr/lib32/libm.so.4
OLD_LIBS+=usr/lib32/libmagic.so.2
OLD_LIBS+=usr/lib32/libmd.so.3
OLD_LIBS+=usr/lib32/libmemstat.so.1
OLD_LIBS+=usr/lib32/libmenu.so.3
OLD_LIBS+=usr/lib32/libmenuw.so.3
OLD_LIBS+=usr/lib32/libmilter.so.3
OLD_LIBS+=usr/lib32/libmp.so.5
OLD_LIBS+=usr/lib32/libncp.so.2
OLD_LIBS+=usr/lib32/libnetgraph.so.2
OLD_LIBS+=usr/lib32/libngatm.so.2
OLD_LIBS+=usr/lib32/libopie.so.4
OLD_LIBS+=usr/lib32/libpanel.so.3
OLD_LIBS+=usr/lib32/libpanelw.so.3
OLD_LIBS+=usr/lib32/libpcap.so.4
OLD_LIBS+=usr/lib32/libpmc.so.3
OLD_LIBS+=usr/lib32/libpthread.so.2
OLD_LIBS+=usr/lib32/libradius.so.2
OLD_LIBS+=usr/lib32/librpcsvc.so.3
OLD_LIBS+=usr/lib32/libsbuf.so.3
OLD_LIBS+=usr/lib32/libsdp.so.2
OLD_LIBS+=usr/lib32/libsmb.so.2
OLD_LIBS+=usr/lib32/libstdc++.so.5
OLD_LIBS+=usr/lib32/libtacplus.so.2
OLD_LIBS+=usr/lib32/libthr.so.2
OLD_LIBS+=usr/lib32/libthread_db.so.2
OLD_LIBS+=usr/lib32/libufs.so.3
OLD_LIBS+=usr/lib32/libugidfw.so.2
OLD_LIBS+=usr/lib32/libusbhid.so.2
OLD_LIBS+=usr/lib32/libutil.so.6
OLD_LIBS+=usr/lib32/libvgl.so.4
OLD_LIBS+=usr/lib32/libwrap.so.4
OLD_LIBS+=usr/lib32/libypclnt.so.2
OLD_LIBS+=usr/lib32/libz.so.3
# 20070519: GCC 4.2
OLD_FILES+=usr/bin/f77
OLD_FILES+=usr/bin/protoize
OLD_FILES+=usr/include/g2c.h
OLD_FILES+=usr/libexec/f771
OLD_FILES+=usr/share/info/g77.info.gz
OLD_FILES+=usr/share/man/man1/f77.1.gz
OLD_FILES+=usr/include/c++/3.4/algorithm
OLD_FILES+=usr/include/c++/3.4/backward/algo.h
OLD_FILES+=usr/include/c++/3.4/backward/algobase.h
OLD_FILES+=usr/include/c++/3.4/backward/alloc.h
OLD_FILES+=usr/include/c++/3.4/backward/backward_warning.h
OLD_FILES+=usr/include/c++/3.4/backward/bvector.h
OLD_FILES+=usr/include/c++/3.4/backward/complex.h
OLD_FILES+=usr/include/c++/3.4/backward/defalloc.h
OLD_FILES+=usr/include/c++/3.4/backward/deque.h
OLD_FILES+=usr/include/c++/3.4/backward/fstream.h
OLD_FILES+=usr/include/c++/3.4/backward/function.h
OLD_FILES+=usr/include/c++/3.4/backward/hash_map.h
OLD_FILES+=usr/include/c++/3.4/backward/hash_set.h
OLD_FILES+=usr/include/c++/3.4/backward/hashtable.h
OLD_FILES+=usr/include/c++/3.4/backward/heap.h
OLD_FILES+=usr/include/c++/3.4/backward/iomanip.h
OLD_FILES+=usr/include/c++/3.4/backward/iostream.h
OLD_FILES+=usr/include/c++/3.4/backward/istream.h
OLD_FILES+=usr/include/c++/3.4/backward/iterator.h
OLD_FILES+=usr/include/c++/3.4/backward/list.h
OLD_FILES+=usr/include/c++/3.4/backward/map.h
OLD_FILES+=usr/include/c++/3.4/backward/multimap.h
OLD_FILES+=usr/include/c++/3.4/backward/multiset.h
OLD_FILES+=usr/include/c++/3.4/backward/new.h
OLD_FILES+=usr/include/c++/3.4/backward/ostream.h
OLD_FILES+=usr/include/c++/3.4/backward/pair.h
OLD_FILES+=usr/include/c++/3.4/backward/queue.h
OLD_FILES+=usr/include/c++/3.4/backward/rope.h
OLD_FILES+=usr/include/c++/3.4/backward/set.h
OLD_FILES+=usr/include/c++/3.4/backward/slist.h
OLD_FILES+=usr/include/c++/3.4/backward/stack.h
OLD_FILES+=usr/include/c++/3.4/backward/stream.h
OLD_FILES+=usr/include/c++/3.4/backward/streambuf.h
OLD_FILES+=usr/include/c++/3.4/backward/strstream
OLD_FILES+=usr/include/c++/3.4/backward/tempbuf.h
OLD_FILES+=usr/include/c++/3.4/backward/tree.h
OLD_FILES+=usr/include/c++/3.4/backward/vector.h
OLD_FILES+=usr/include/c++/3.4/bits/allocator.h
OLD_FILES+=usr/include/c++/3.4/bits/atomic_word.h
OLD_FILES+=usr/include/c++/3.4/bits/atomicity.h
OLD_FILES+=usr/include/c++/3.4/bits/basic_file.h
OLD_FILES+=usr/include/c++/3.4/bits/basic_ios.h
OLD_FILES+=usr/include/c++/3.4/bits/basic_ios.tcc
OLD_FILES+=usr/include/c++/3.4/bits/basic_string.h
OLD_FILES+=usr/include/c++/3.4/bits/basic_string.tcc
OLD_FILES+=usr/include/c++/3.4/bits/boost_concept_check.h
OLD_FILES+=usr/include/c++/3.4/bits/c++allocator.h
OLD_FILES+=usr/include/c++/3.4/bits/c++config.h
OLD_FILES+=usr/include/c++/3.4/bits/c++io.h
OLD_FILES+=usr/include/c++/3.4/bits/c++locale.h
OLD_FILES+=usr/include/c++/3.4/bits/c++locale_internal.h
OLD_FILES+=usr/include/c++/3.4/bits/char_traits.h
OLD_FILES+=usr/include/c++/3.4/bits/cmath.tcc
OLD_FILES+=usr/include/c++/3.4/bits/codecvt.h
OLD_FILES+=usr/include/c++/3.4/bits/codecvt_specializations.h
OLD_FILES+=usr/include/c++/3.4/bits/concept_check.h
OLD_FILES+=usr/include/c++/3.4/bits/concurrence.h
OLD_FILES+=usr/include/c++/3.4/bits/cpp_type_traits.h
OLD_FILES+=usr/include/c++/3.4/bits/ctype_base.h
OLD_FILES+=usr/include/c++/3.4/bits/ctype_inline.h
OLD_FILES+=usr/include/c++/3.4/bits/ctype_noninline.h
OLD_FILES+=usr/include/c++/3.4/bits/deque.tcc
OLD_FILES+=usr/include/c++/3.4/bits/fstream.tcc
OLD_FILES+=usr/include/c++/3.4/bits/functexcept.h
OLD_FILES+=usr/include/c++/3.4/bits/gslice.h
OLD_FILES+=usr/include/c++/3.4/bits/gslice_array.h
OLD_FILES+=usr/include/c++/3.4/bits/gthr-default.h
OLD_FILES+=usr/include/c++/3.4/bits/gthr-posix.h
OLD_FILES+=usr/include/c++/3.4/bits/gthr-single.h
OLD_FILES+=usr/include/c++/3.4/bits/gthr.h
OLD_FILES+=usr/include/c++/3.4/bits/indirect_array.h
OLD_FILES+=usr/include/c++/3.4/bits/ios_base.h
OLD_FILES+=usr/include/c++/3.4/bits/istream.tcc
OLD_FILES+=usr/include/c++/3.4/bits/list.tcc
OLD_FILES+=usr/include/c++/3.4/bits/locale_classes.h
OLD_FILES+=usr/include/c++/3.4/bits/locale_facets.h
OLD_FILES+=usr/include/c++/3.4/bits/locale_facets.tcc
OLD_FILES+=usr/include/c++/3.4/bits/localefwd.h
OLD_FILES+=usr/include/c++/3.4/bits/mask_array.h
OLD_FILES+=usr/include/c++/3.4/bits/messages_members.h
OLD_FILES+=usr/include/c++/3.4/bits/os_defines.h
OLD_FILES+=usr/include/c++/3.4/bits/ostream.tcc
OLD_FILES+=usr/include/c++/3.4/bits/postypes.h
OLD_FILES+=usr/include/c++/3.4/bits/slice_array.h
OLD_FILES+=usr/include/c++/3.4/bits/sstream.tcc
OLD_FILES+=usr/include/c++/3.4/bits/stl_algo.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_algobase.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_bvector.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_construct.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_deque.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_function.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_heap.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator_base_funcs.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_iterator_base_types.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_list.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_map.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_multimap.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_multiset.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_numeric.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_pair.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_queue.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_raw_storage_iter.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_relops.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_set.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_stack.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_tempbuf.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_threads.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_tree.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_uninitialized.h
OLD_FILES+=usr/include/c++/3.4/bits/stl_vector.h
OLD_FILES+=usr/include/c++/3.4/bits/stream_iterator.h
OLD_FILES+=usr/include/c++/3.4/bits/streambuf.tcc
OLD_FILES+=usr/include/c++/3.4/bits/streambuf_iterator.h
OLD_FILES+=usr/include/c++/3.4/bits/stringfwd.h
OLD_FILES+=usr/include/c++/3.4/bits/time_members.h
OLD_FILES+=usr/include/c++/3.4/bits/type_traits.h
OLD_FILES+=usr/include/c++/3.4/bits/valarray_after.h
OLD_FILES+=usr/include/c++/3.4/bits/valarray_array.h
OLD_FILES+=usr/include/c++/3.4/bits/valarray_array.tcc
OLD_FILES+=usr/include/c++/3.4/bits/valarray_before.h
OLD_FILES+=usr/include/c++/3.4/bits/vector.tcc
OLD_FILES+=usr/include/c++/3.4/bitset
OLD_FILES+=usr/include/c++/3.4/cassert
OLD_FILES+=usr/include/c++/3.4/cctype
OLD_FILES+=usr/include/c++/3.4/cerrno
OLD_FILES+=usr/include/c++/3.4/cfloat
OLD_FILES+=usr/include/c++/3.4/ciso646
OLD_FILES+=usr/include/c++/3.4/climits
OLD_FILES+=usr/include/c++/3.4/clocale
OLD_FILES+=usr/include/c++/3.4/cmath
OLD_FILES+=usr/include/c++/3.4/complex
OLD_FILES+=usr/include/c++/3.4/csetjmp
OLD_FILES+=usr/include/c++/3.4/csignal
OLD_FILES+=usr/include/c++/3.4/cstdarg
OLD_FILES+=usr/include/c++/3.4/cstddef
OLD_FILES+=usr/include/c++/3.4/cstdio
OLD_FILES+=usr/include/c++/3.4/cstdlib
OLD_FILES+=usr/include/c++/3.4/cstring
OLD_FILES+=usr/include/c++/3.4/ctime
OLD_FILES+=usr/include/c++/3.4/cwchar
OLD_FILES+=usr/include/c++/3.4/cwctype
OLD_FILES+=usr/include/c++/3.4/cxxabi.h
OLD_FILES+=usr/include/c++/3.4/debug/bitset
OLD_FILES+=usr/include/c++/3.4/debug/debug.h
OLD_FILES+=usr/include/c++/3.4/debug/deque
OLD_FILES+=usr/include/c++/3.4/debug/formatter.h
OLD_FILES+=usr/include/c++/3.4/debug/hash_map
OLD_FILES+=usr/include/c++/3.4/debug/hash_map.h
OLD_FILES+=usr/include/c++/3.4/debug/hash_multimap.h
OLD_FILES+=usr/include/c++/3.4/debug/hash_multiset.h
OLD_FILES+=usr/include/c++/3.4/debug/hash_set
OLD_FILES+=usr/include/c++/3.4/debug/hash_set.h
OLD_FILES+=usr/include/c++/3.4/debug/list
OLD_FILES+=usr/include/c++/3.4/debug/map
OLD_FILES+=usr/include/c++/3.4/debug/map.h
OLD_FILES+=usr/include/c++/3.4/debug/multimap.h
OLD_FILES+=usr/include/c++/3.4/debug/multiset.h
OLD_FILES+=usr/include/c++/3.4/debug/safe_base.h
OLD_FILES+=usr/include/c++/3.4/debug/safe_iterator.h
OLD_FILES+=usr/include/c++/3.4/debug/safe_iterator.tcc
OLD_FILES+=usr/include/c++/3.4/debug/safe_sequence.h
OLD_FILES+=usr/include/c++/3.4/debug/set
OLD_FILES+=usr/include/c++/3.4/debug/set.h
OLD_FILES+=usr/include/c++/3.4/debug/string
OLD_FILES+=usr/include/c++/3.4/debug/vector
OLD_FILES+=usr/include/c++/3.4/deque
OLD_FILES+=usr/include/c++/3.4/exception
OLD_FILES+=usr/include/c++/3.4/exception_defines.h
OLD_FILES+=usr/include/c++/3.4/ext/algorithm
OLD_FILES+=usr/include/c++/3.4/ext/bitmap_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/debug_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/enc_filebuf.h
OLD_FILES+=usr/include/c++/3.4/ext/functional
OLD_FILES+=usr/include/c++/3.4/ext/hash_fun.h
OLD_FILES+=usr/include/c++/3.4/ext/hash_map
OLD_FILES+=usr/include/c++/3.4/ext/hash_set
OLD_FILES+=usr/include/c++/3.4/ext/hashtable.h
OLD_FILES+=usr/include/c++/3.4/ext/iterator
OLD_FILES+=usr/include/c++/3.4/ext/malloc_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/memory
OLD_FILES+=usr/include/c++/3.4/ext/mt_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/new_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/numeric
OLD_FILES+=usr/include/c++/3.4/ext/pod_char_traits.h
OLD_FILES+=usr/include/c++/3.4/ext/pool_allocator.h
OLD_FILES+=usr/include/c++/3.4/ext/rb_tree
OLD_FILES+=usr/include/c++/3.4/ext/rope
OLD_FILES+=usr/include/c++/3.4/ext/ropeimpl.h
OLD_FILES+=usr/include/c++/3.4/ext/slist
OLD_FILES+=usr/include/c++/3.4/ext/stdio_filebuf.h
OLD_FILES+=usr/include/c++/3.4/ext/stdio_sync_filebuf.h
OLD_FILES+=usr/include/c++/3.4/fstream
OLD_FILES+=usr/include/c++/3.4/functional
OLD_FILES+=usr/include/c++/3.4/iomanip
OLD_FILES+=usr/include/c++/3.4/ios
OLD_FILES+=usr/include/c++/3.4/iosfwd
OLD_FILES+=usr/include/c++/3.4/iostream
OLD_FILES+=usr/include/c++/3.4/istream
OLD_FILES+=usr/include/c++/3.4/iterator
OLD_FILES+=usr/include/c++/3.4/limits
OLD_FILES+=usr/include/c++/3.4/list
OLD_FILES+=usr/include/c++/3.4/locale
OLD_FILES+=usr/include/c++/3.4/map
OLD_FILES+=usr/include/c++/3.4/memory
OLD_FILES+=usr/include/c++/3.4/new
OLD_FILES+=usr/include/c++/3.4/numeric
OLD_FILES+=usr/include/c++/3.4/ostream
OLD_FILES+=usr/include/c++/3.4/queue
OLD_FILES+=usr/include/c++/3.4/set
OLD_FILES+=usr/include/c++/3.4/sstream
OLD_FILES+=usr/include/c++/3.4/stack
OLD_FILES+=usr/include/c++/3.4/stdexcept
OLD_FILES+=usr/include/c++/3.4/streambuf
OLD_FILES+=usr/include/c++/3.4/string
OLD_FILES+=usr/include/c++/3.4/typeinfo
OLD_FILES+=usr/include/c++/3.4/utility
OLD_FILES+=usr/include/c++/3.4/valarray
OLD_FILES+=usr/include/c++/3.4/vector
OLD_DIRS+=usr/include/c++/3.4/backward
OLD_DIRS+=usr/include/c++/3.4/bits
OLD_DIRS+=usr/include/c++/3.4/debug
OLD_DIRS+=usr/include/c++/3.4/ext
OLD_DIRS+=usr/include/c++/3.4
# 20070510: zpool/zfs moved to /sbin
OLD_FILES+=usr/sbin/zfs
OLD_FILES+=usr/sbin/zpool
# 20070423: rc.bluetooth (examples) removed
OLD_FILES+=usr/share/examples/netgraph/bluetooth/rc.bluetooth
OLD_DIRS+=usr/share/examples/netgraph/bluetooth
# 20070421: worm.4 removed
OLD_FILES+=usr/share/man/man4/worm.4.gz
# 20070417: trunk(4) renamed to lagg(4)
OLD_FILES+=usr/include/net/if_trunk.h
# 20070409: uuidgen moved to /bin/
OLD_FILES+=usr/bin/uuidgen
# 20070328: bzip2 1.0.4
OLD_FILES+=usr/share/info/bzip2.info.gz
# 20070303: libarchive 2.0
OLD_LIBS+=usr/lib/libarchive.so.3
OLD_LIBS+=usr/lib32/libarchive.so.3
# 20070301: remove addr2ascii and ascii2addr
OLD_FILES+=usr/share/man/man3/addr2ascii.3.gz
OLD_FILES+=usr/share/man/man3/ascii2addr.3.gz
# 20070225: vm_page_unmanage() removed
OLD_FILES+=usr/share/man/man9/vm_page_unmanage.9.gz
# 20070216: VFS_VPTOFH(9) -> VOP_VPTOFH(9)
OLD_FILES+=usr/share/man/man9/VFS_VPTOFH.9.gz
# 20070212: kame.4 removed
OLD_FILES+=usr/share/man/man4/kame.4.gz
# 20070201: remove libmytinfo link
OLD_FILES+=usr/lib/libmytinfo.a
OLD_FILES+=usr/lib/libmytinfo.so
OLD_FILES+=usr/lib/libmytinfo_p.a
OLD_FILES+=usr/lib/libmytinfow.a
OLD_FILES+=usr/lib/libmytinfow.so
OLD_FILES+=usr/lib/libmytinfow_p.a
OLD_FILES+=usr/lib32/libmytinfo.a
OLD_FILES+=usr/lib32/libmytinfo.so
OLD_FILES+=usr/lib32/libmytinfo_p.a
OLD_FILES+=usr/lib32/libmytinfow.a
OLD_FILES+=usr/lib32/libmytinfow.so
OLD_FILES+=usr/lib32/libmytinfow_p.a
# 20070128: remove vnconfig
OLD_FILES+=usr/sbin/vnconfig
# 20070127: remove bpf_compat.h
OLD_FILES+=usr/include/net/bpf_compat.h
# 20070125: objformat bites the dust
OLD_FILES+=usr/bin/objformat
OLD_FILES+=usr/share/man/man1/objformat.1.gz
OLD_FILES+=usr/include/objformat.h
OLD_FILES+=usr/share/man/man3/getobjformat.3.gz
# 20061201: remove symlink to *.so.4 libalias modules
OLD_FILES+=usr/lib/libalias_cuseeme.so
OLD_FILES+=usr/lib/libalias_dummy.so
OLD_FILES+=usr/lib/libalias_ftp.so
OLD_FILES+=usr/lib/libalias_irc.so
OLD_FILES+=usr/lib/libalias_nbt.so
OLD_FILES+=usr/lib/libalias_pptp.so
OLD_FILES+=usr/lib/libalias_skinny.so
OLD_FILES+=usr/lib/libalias_smedia.so
# 20061201: remove old *.so.4 libalias modules
OLD_FILES+=lib/libalias_cuseeme.so.4
OLD_FILES+=lib/libalias_dummy.so.4
OLD_FILES+=lib/libalias_ftp.so.4
OLD_FILES+=lib/libalias_irc.so.4
OLD_FILES+=lib/libalias_nbt.so.4
OLD_FILES+=lib/libalias_pptp.so.4
OLD_FILES+=lib/libalias_skinny.so.4
OLD_FILES+=lib/libalias_smedia.so.4
# 20061126: remove old man page
OLD_FILES+=usr/share/man/man3/archive_read_set_bytes_per_block.3.gz
# 20061125: remove old man page
OLD_FILES+=usr/share/man/man9/devsw.9.gz
# 20061122: remove obsolete mount programs
OLD_FILES+=sbin/mount_devfs
OLD_FILES+=sbin/mount_ext2fs
OLD_FILES+=sbin/mount_fdescfs
OLD_FILES+=sbin/mount_linprocfs
OLD_FILES+=sbin/mount_procfs
OLD_FILES+=sbin/mount_std
OLD_FILES+=rescue/mount_devfs
OLD_FILES+=rescue/mount_ext2fs
OLD_FILES+=rescue/mount_fdescfs
OLD_FILES+=rescue/mount_linprocfs
OLD_FILES+=rescue/mount_procfs
OLD_FILES+=rescue/mount_std
OLD_FILES+=usr/share/man/man8/mount_devfs.8.gz
OLD_FILES+=usr/share/man/man8/mount_ext2fs.8.gz
OLD_FILES+=usr/share/man/man8/mount_fdescfs.8.gz
OLD_FILES+=usr/share/man/man8/mount_linprocfs.8.gz
OLD_FILES+=usr/share/man/man8/mount_procfs.8.gz
OLD_FILES+=usr/share/man/man8/mount_std.8.gz
# 20061116: uhidev.4 removed
OLD_FILES+=usr/share/man/man4/uhidev.4.gz
# 20061106: archive_write_prepare.3 removed
OLD_FILES+=usr/share/man/man3/archive_write_prepare.3.gz
# 20061018: pccardc removed
OLD_FILES+=usr/sbin/pccardc usr/share/man/man8/pccardc.8.gz
# 20060930: demangle.h from contrib/libstdc++/include/ext/
OLD_FILES+=usr/include/c++/3.4/ext/demangle.h
# 20060929: mrouted removed
OLD_FILES+=usr/sbin/map-mbone
OLD_FILES+=usr/sbin/mrinfo
OLD_FILES+=usr/sbin/mrouted
OLD_FILES+=usr/sbin/mtrace
OLD_FILES+=usr/share/man/man8/map-mbone.8.gz
OLD_FILES+=usr/share/man/man8/mrinfo.8.gz
OLD_FILES+=usr/share/man/man8/mrouted.8.gz
OLD_FILES+=usr/share/man/man8/mtrace.8.gz
# 20060924: tcpslice removed
OLD_FILES+=usr/sbin/tcpslice
OLD_FILES+=usr/share/man/man1/tcpslice.1.gz
# 20060829: kvmdb cleanup script removed
OLD_FILES+=etc/periodic/weekly/120.clean-kvmdb
# 20060822: ramdisk{,-own} have been replaced by mdconfig{,2}
OLD_FILES+=etc/rc.d/ramdisk
OLD_FILES+=etc/rc.d/ramdisk-own
# 20060729: OpenSSL 0.9.7e -> 0.9.8b upgrade
OLD_FILES+=usr/include/openssl/eng_int.h
OLD_FILES+=usr/include/openssl/hw_4758_cca_err.h
OLD_FILES+=usr/include/openssl/hw_aep_err.h
OLD_FILES+=usr/include/openssl/hw_atalla_err.h
OLD_FILES+=usr/include/openssl/hw_cswift_err.h
OLD_FILES+=usr/include/openssl/hw_ncipher_err.h
OLD_FILES+=usr/include/openssl/hw_nuron_err.h
OLD_FILES+=usr/include/openssl/hw_sureware_err.h
OLD_FILES+=usr/include/openssl/hw_ubsec_err.h
# 20060713: mount_linsysfs(8) never existed in 7.x
OLD_FILES+=sbin/mount_linsysfs
OLD_FILES+=usr/share/man/man8/mount_linsysfs.8.gz
# 20060704: KAME compat file net_osdep.h removed
OLD_FILES+=usr/include/net/net_osdep.h
# 20060605: man page links removed by OpenBSM 1.0 alpha 6 import
OLD_FILES+=usr/share/man/man3/au_to_socket.3.gz
OLD_FILES+=usr/share/man/man3/au_to_socket_ex_128.3.gz
OLD_FILES+=usr/share/man/man3/au_to_socket_ex_32.3.gz
# 20060517: pcvt removed
OLD_FILES+=usr/share/pcvt/README.FIRST
OLD_FILES+=usr/share/pcvt/Etc/xmodmap-german
OLD_FILES+=usr/share/pcvt/Etc/pcvt.sh
OLD_FILES+=usr/share/pcvt/Etc/pcvt.el
OLD_FILES+=usr/share/pcvt/Etc/Terminfo
OLD_FILES+=usr/share/pcvt/Etc/Termcap
OLD_DIRS+=usr/share/pcvt/Etc
OLD_FILES+=usr/share/pcvt/Doc/NotesAndHints
OLD_FILES+=usr/share/pcvt/Doc/Keyboard.VT
OLD_FILES+=usr/share/pcvt/Doc/Keyboard.HP
OLD_FILES+=usr/share/pcvt/Doc/EscapeSequences
OLD_FILES+=usr/share/pcvt/Doc/Charsets
OLD_FILES+=usr/share/pcvt/Doc/CharGen
OLD_FILES+=usr/share/pcvt/Doc/Bibliography
OLD_FILES+=usr/share/pcvt/Doc/Acknowledgements
OLD_DIRS+=usr/share/pcvt/Doc
OLD_DIRS+=usr/share/pcvt
OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.816
OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.814
OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.810
OLD_FILES+=usr/share/misc/pcvtfonts/vt220l.808
OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.816
OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.814
OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.810
OLD_FILES+=usr/share/misc/pcvtfonts/vt220h.808
OLD_DIRS+=usr/share/misc/pcvtfonts
OLD_FILES+=usr/share/misc/keycap.pcvt
OLD_FILES+=usr/share/man/man8/ispcvt.8.gz
OLD_FILES+=usr/share/man/man5/keycap.5.gz
OLD_FILES+=usr/share/man/man4/pcvt.4.gz
OLD_FILES+=usr/share/man/man3/kgetstr.3.gz
OLD_FILES+=usr/share/man/man3/kgetnum.3.gz
OLD_FILES+=usr/share/man/man3/kgetflag.3.gz
OLD_FILES+=usr/share/man/man3/kgetent.3.gz
OLD_FILES+=usr/share/man/man3/keycap.3.gz
OLD_FILES+=usr/share/man/man1/vt220keys.1.gz
OLD_FILES+=usr/share/man/man1/scon.1.gz
OLD_FILES+=usr/share/man/man1/loadfont.1.gz
OLD_FILES+=usr/share/man/man1/kcon.1.gz
OLD_FILES+=usr/share/man/man1/fontedit.1.gz
OLD_FILES+=usr/share/man/man1/cursor.1.gz
OLD_FILES+=usr/sbin/vt220keys
OLD_FILES+=usr/sbin/scon
OLD_FILES+=usr/sbin/loadfont
OLD_FILES+=usr/sbin/kcon
OLD_FILES+=usr/sbin/ispcvt
OLD_FILES+=usr/sbin/fontedit
OLD_FILES+=usr/sbin/cursor
OLD_FILES+=usr/lib/libkeycap_p.a
OLD_FILES+=usr/lib/libkeycap.a
OLD_FILES+=usr/include/machine/pcvt_ioctl.h
# 20060514: lnc(4) replaced by le(4)
OLD_FILES+=usr/share/man/man4/i386/lnc.4.gz
# 20060512: remove ip6fw
OLD_FILES+=etc/periodic/security/600.ip6fwdenied
OLD_FILES+=etc/periodic/security/650.ip6fwlimit
OLD_FILES+=sbin/ip6fw
OLD_FILES+=usr/include/netinet6/ip6_fw.h
OLD_FILES+=usr/share/man/man8/ip6fw.8.gz
# 20060424: sab(4) removed
OLD_FILES+=usr/share/man/man4/sab.4.gz
# 20060328: remove redundant rc.d script
OLD_FILES+=etc/rc.d/ike
# 20060127: revert libdisk to static-only
OLD_FILES+=usr/lib/libdisk.so
# 20060115: sys/pccard includes cleanup
OLD_FILES+=usr/include/pccard/driver.h
OLD_FILES+=usr/include/pccard/i82365.h
OLD_FILES+=usr/include/pccard/meciareg.h
OLD_FILES+=usr/include/pccard/pccard_nbk.h
OLD_FILES+=usr/include/pccard/pcic_pci.h
OLD_FILES+=usr/include/pccard/pcicvar.h
OLD_FILES+=usr/include/pccard/slot.h
# 20051215: rescue/nextboot.sh renamed to rescue/nextboot
OLD_FILES+=rescue/nextboot.sh
# 20051214: usbd(8) removed
OLD_FILES+=etc/rc.d/usbd
OLD_FILES+=etc/usbd.conf
OLD_FILES+=usr/sbin/usbd
OLD_FILES+=usr/share/man/man8/usbd.8.gz
# 20051029: rc.d/ppp-user renamed to rc.d/ppp for convenience
OLD_FILES+=etc/rc.d/ppp-user
# 20051012: setkey(8) moved to /sbin/
OLD_FILES+=usr/sbin/setkey
# 20050930: pccardd(8) removed
OLD_FILES+=usr/sbin/pccardd
OLD_FILES+=usr/share/man/man5/pccard.conf.5.gz
OLD_FILES+=usr/share/man/man8/pccardd.8.gz
# 20050927: bridge(4) replaced by if_bridge(4)
OLD_FILES+=usr/include/net/bridge.h
# 20050831: not implemented
OLD_FILES+=usr/share/man/man3/getino.3.gz
OLD_FILES+=usr/share/man/man3/putino.3.gz
# 20050825: T/TCP retired several months ago
OLD_FILES+=usr/share/man/man4/ttcp.4.gz
# 20050805 tn3270 retired long ago
OLD_FILES+=usr/share/misc/map3270
# 20050801: too old to be interesting here
OLD_FILES+=usr/share/doc/papers/px.ps.gz
# 20050721: moved to ports
OLD_FILES+=usr/sbin/vttest
OLD_FILES+=usr/share/man/man1/vttest.1.gz
# 20050617: wpa man pages moved to section 8
OLD_FILES+=usr/share/man/man1/hostapd.1.gz
OLD_FILES+=usr/share/man/man1/hostapd_cli.1.gz
OLD_FILES+=usr/share/man/man1/wpa_cli.1.gz
OLD_FILES+=usr/share/man/man1/wpa_supplicant.1.gz
# 20050610: rexecd (insecure by design)
OLD_FILES+=etc/pam.d/rexecd
OLD_FILES+=usr/share/man/man8/rexecd.8.gz
OLD_FILES+=usr/libexec/rexecd
# 20050606: OpenBSD dhclient replaces ISC one
OLD_FILES+=bin/omshell
OLD_FILES+=sbin/omshell
OLD_FILES+=usr/share/man/man1/omshell.1.gz
OLD_FILES+=usr/share/man/man5/dhcp-eval.5.gz
# 200504XX: ipf tools moved from /usr to /
OLD_FILES+=rescue/ipfs
OLD_FILES+=rescue/ipfstat
OLD_FILES+=rescue/ipmon
OLD_FILES+=rescue/ipnat
OLD_FILES+=usr/sbin/ipftest
OLD_FILES+=usr/sbin/ipresend
OLD_FILES+=usr/sbin/ipsend
OLD_FILES+=usr/sbin/iptest
OLD_FILES+=usr/share/man/man1/ipnat.1.gz
OLD_FILES+=usr/share/man/man1/ipsend.1.gz
OLD_FILES+=usr/share/man/man1/iptest.1.gz
OLD_FILES+=usr/share/man/man5/ipsend.5.gz
# 200503XX: bsdtar takes over gtar
OLD_FILES+=usr/bin/gtar
OLD_FILES+=usr/share/man/man1/gtar.1.gz
# 200503XX
OLD_FILES+=usr/share/man/man3/exp10.3.gz
OLD_FILES+=usr/share/man/man3/exp10f.3.gz
OLD_FILES+=usr/share/man/man3/fpsetsticky.3.gz
# 20050324: updated release infrastructure
OLD_FILES+=usr/share/man/man5/drivers.conf.5.gz
# 20050317: removed from BIND 9 distribution
OLD_FILES+=usr/share/doc/bind9/KNOWN_DEFECTS
# 2005XXXX:
OLD_FILES+=sbin/mount_autofs
OLD_FILES+=usr/lib/libautofs.a
OLD_FILES+=usr/lib/libautofs.so
OLD_FILES+=usr/share/man/man8/mount_autofs.8.gz
# 20050203: Merged with fortunes
OLD_FILES+=usr/share/games/fortune/fortunes2
OLD_FILES+=usr/share/games/fortune/fortunes2.dat
# 200501XX:
OLD_FILES+=usr/libexec/getNAME
# 200411XX: gvinum replaces vinum
OLD_FILES+=bin/vinum
OLD_FILES+=rescue/vinum
OLD_FILES+=sbin/vinum
OLD_FILES+=usr/share/man/man8/vinum.8.gz
# 200411XX: libxpg4 removal
OLD_FILES+=usr/lib/libxpg4.a
OLD_FILES+=usr/lib/libxpg4.so
OLD_FILES+=usr/lib/libxpg4_p.a
# 20041109: replaced by em(4)
OLD_FILES+=usr/share/man/man4/gx.4.gz
OLD_FILES+=usr/share/man/man4/if_gx.4.gz
# 20041017: rune interface removed
OLD_FILES+=usr/include/rune.h
OLD_FILES+=usr/share/man/man3/fgetrune.3.gz
OLD_FILES+=usr/share/man/man3/fputrune.3.gz
OLD_FILES+=usr/share/man/man3/fungetrune.3.gz
OLD_FILES+=usr/share/man/man3/mbrrune.3.gz
OLD_FILES+=usr/share/man/man3/mbrune.3.gz
OLD_FILES+=usr/share/man/man3/rune.3.gz
OLD_FILES+=usr/share/man/man3/setinvalidrune.3.gz
OLD_FILES+=usr/share/man/man3/sgetrune.3.gz
OLD_FILES+=usr/share/man/man3/sputrune.3.gz
# 20040925: bind9 import
OLD_FILES+=usr/bin/dnskeygen
OLD_FILES+=usr/bin/dnsquery
OLD_FILES+=usr/lib/libisc.a
OLD_FILES+=usr/lib/libisc.so
OLD_FILES+=usr/lib/libisc_p.a
OLD_FILES+=usr/libexec/named-xfer
OLD_FILES+=usr/sbin/named.restart
OLD_FILES+=usr/sbin/ndc
OLD_FILES+=usr/sbin/nslookup
OLD_FILES+=usr/sbin/nsupdate
OLD_FILES+=usr/share/doc/bind/html/acl.html
OLD_FILES+=usr/share/doc/bind/html/address_list.html
OLD_FILES+=usr/share/doc/bind/html/comments.html
OLD_FILES+=usr/share/doc/bind/html/config.html
OLD_FILES+=usr/share/doc/bind/html/controls.html
OLD_FILES+=usr/share/doc/bind/html/docdef.html
OLD_FILES+=usr/share/doc/bind/html/example.html
OLD_FILES+=usr/share/doc/bind/html/include.html
OLD_FILES+=usr/share/doc/bind/html/index.html
OLD_FILES+=usr/share/doc/bind/html/key.html
OLD_FILES+=usr/share/doc/bind/html/logging.html
OLD_FILES+=usr/share/doc/bind/html/master.html
OLD_FILES+=usr/share/doc/bind/html/options.html
OLD_FILES+=usr/share/doc/bind/html/server.html
OLD_FILES+=usr/share/doc/bind/html/trusted-keys.html
OLD_FILES+=usr/share/doc/bind/html/zone.html
OLD_FILES+=usr/share/doc/bind/misc/DynamicUpdate
OLD_FILES+=usr/share/doc/bind/misc/FAQ.1of2
OLD_FILES+=usr/share/doc/bind/misc/FAQ.2of2
OLD_FILES+=usr/share/doc/bind/misc/rfc2317-notes.txt
OLD_FILES+=usr/share/doc/bind/misc/style.txt
OLD_FILES+=usr/share/man/man1/dnskeygen.1.gz
OLD_FILES+=usr/share/man/man1/dnsquery.1.gz
OLD_FILES+=usr/share/man/man8/named-bootconf.8.gz
OLD_FILES+=usr/share/man/man8/named-xfer.8.gz
OLD_FILES+=usr/share/man/man8/named.restart.8.gz
OLD_FILES+=usr/share/man/man8/ndc.8.gz
OLD_FILES+=usr/share/man/man8/nslookup.8.gz
# 200409XX
OLD_FILES+=usr/share/man/man3/ENSURE.3.gz
OLD_FILES+=usr/share/man/man3/ENSURE_ERR.3.gz
OLD_FILES+=usr/share/man/man3/INSIST.3.gz
OLD_FILES+=usr/share/man/man3/INSIST_ERR.3.gz
OLD_FILES+=usr/share/man/man3/INVARIANT.3.gz
OLD_FILES+=usr/share/man/man3/INVARIANT_ERR.3.gz
OLD_FILES+=usr/share/man/man3/REQUIRE.3.gz
OLD_FILES+=usr/share/man/man3/REQUIRE_ERR.3.gz
OLD_FILES+=usr/share/man/man3/assertion_type_to_text.3.gz
OLD_FILES+=usr/share/man/man3/assertions.3.gz
OLD_FILES+=usr/share/man/man3/bitncmp.3.gz
OLD_FILES+=usr/share/man/man3/evAddTime.3.gz
OLD_FILES+=usr/share/man/man3/evCancelConn.3.gz
OLD_FILES+=usr/share/man/man3/evCancelRW.3.gz
OLD_FILES+=usr/share/man/man3/evClearIdleTimer.3.gz
OLD_FILES+=usr/share/man/man3/evClearTimer.3.gz
OLD_FILES+=usr/share/man/man3/evCmpTime.3.gz
OLD_FILES+=usr/share/man/man3/evConnFunc.3.gz
OLD_FILES+=usr/share/man/man3/evConnect.3.gz
OLD_FILES+=usr/share/man/man3/evConsIovec.3.gz
OLD_FILES+=usr/share/man/man3/evConsTime.3.gz
OLD_FILES+=usr/share/man/man3/evCreate.3.gz
OLD_FILES+=usr/share/man/man3/evDefer.3.gz
OLD_FILES+=usr/share/man/man3/evDeselectFD.3.gz
OLD_FILES+=usr/share/man/man3/evDestroy.3.gz
OLD_FILES+=usr/share/man/man3/evDispatch.3.gz
OLD_FILES+=usr/share/man/man3/evDo.3.gz
OLD_FILES+=usr/share/man/man3/evDrop.3.gz
OLD_FILES+=usr/share/man/man3/evFileFunc.3.gz
OLD_FILES+=usr/share/man/man3/evGetNext.3.gz
OLD_FILES+=usr/share/man/man3/evHold.3.gz
OLD_FILES+=usr/share/man/man3/evInitID.3.gz
OLD_FILES+=usr/share/man/man3/evLastEventTime.3.gz
OLD_FILES+=usr/share/man/man3/evListen.3.gz
OLD_FILES+=usr/share/man/man3/evMainLoop.3.gz
OLD_FILES+=usr/share/man/man3/evNowTime.3.gz
OLD_FILES+=usr/share/man/man3/evPrintf.3.gz
OLD_FILES+=usr/share/man/man3/evRead.3.gz
OLD_FILES+=usr/share/man/man3/evResetTimer.3.gz
OLD_FILES+=usr/share/man/man3/evSelectFD.3.gz
OLD_FILES+=usr/share/man/man3/evSetDebug.3.gz
OLD_FILES+=usr/share/man/man3/evSetIdleTimer.3.gz
OLD_FILES+=usr/share/man/man3/evSetTimer.3.gz
OLD_FILES+=usr/share/man/man3/evStreamFunc.3.gz
OLD_FILES+=usr/share/man/man3/evSubTime.3.gz
OLD_FILES+=usr/share/man/man3/evTestID.3.gz
OLD_FILES+=usr/share/man/man3/evTimeRW.3.gz
OLD_FILES+=usr/share/man/man3/evTimeSpec.3.gz
OLD_FILES+=usr/share/man/man3/evTimeVal.3.gz
OLD_FILES+=usr/share/man/man3/evTimerFunc.3.gz
OLD_FILES+=usr/share/man/man3/evTouchIdleTimer.3.gz
OLD_FILES+=usr/share/man/man3/evTryAccept.3.gz
OLD_FILES+=usr/share/man/man3/evUnhold.3.gz
OLD_FILES+=usr/share/man/man3/evUntimeRW.3.gz
OLD_FILES+=usr/share/man/man3/evUnwait.3.gz
OLD_FILES+=usr/share/man/man3/evWaitFor.3.gz
OLD_FILES+=usr/share/man/man3/evWaitFunc.3.gz
OLD_FILES+=usr/share/man/man3/evWrite.3.gz
OLD_FILES+=usr/share/man/man3/eventlib.3.gz
OLD_FILES+=usr/share/man/man3/heap.3.gz
OLD_FILES+=usr/share/man/man3/heap_decreased.3.gz
OLD_FILES+=usr/share/man/man3/heap_delete.3.gz
OLD_FILES+=usr/share/man/man3/heap_element.3.gz
OLD_FILES+=usr/share/man/man3/heap_for_each.3.gz
OLD_FILES+=usr/share/man/man3/heap_free.3.gz
OLD_FILES+=usr/share/man/man3/heap_increased.3.gz
OLD_FILES+=usr/share/man/man3/heap_insert.3.gz
OLD_FILES+=usr/share/man/man3/heap_new.3.gz
OLD_FILES+=usr/share/man/man3/log_add_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_category_is_active.3.gz
OLD_FILES+=usr/share/man/man3/log_close_stream.3.gz
OLD_FILES+=usr/share/man/man3/log_dec_references.3.gz
OLD_FILES+=usr/share/man/man3/log_free_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_free_context.3.gz
OLD_FILES+=usr/share/man/man3/log_get_filename.3.gz
OLD_FILES+=usr/share/man/man3/log_get_stream.3.gz
OLD_FILES+=usr/share/man/man3/log_inc_references.3.gz
OLD_FILES+=usr/share/man/man3/log_new_context.3.gz
OLD_FILES+=usr/share/man/man3/log_new_file_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_new_null_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_new_syslog_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_open_stream.3.gz
OLD_FILES+=usr/share/man/man3/log_option.3.gz
OLD_FILES+=usr/share/man/man3/log_remove_channel.3.gz
OLD_FILES+=usr/share/man/man3/log_set_file_owner.3.gz
OLD_FILES+=usr/share/man/man3/log_vwrite.3.gz
OLD_FILES+=usr/share/man/man3/log_write.3.gz
OLD_FILES+=usr/share/man/man3/logging.3.gz
OLD_FILES+=usr/share/man/man3/memcluster.3.gz
OLD_FILES+=usr/share/man/man3/memget.3.gz
OLD_FILES+=usr/share/man/man3/memput.3.gz
OLD_FILES+=usr/share/man/man3/memstats.3.gz
OLD_FILES+=usr/share/man/man3/set_assertion_failure_callback.3.
OLD_FILES+=usr/share/man/man3/sigwait.3.gz
OLD_FILES+=usr/share/man/man3/tree_add.3.gz
OLD_FILES+=usr/share/man/man3/tree_delete.3.gz
OLD_FILES+=usr/share/man/man3/tree_init.3.gz
OLD_FILES+=usr/share/man/man3/tree_mung.3.gz
OLD_FILES+=usr/share/man/man3/tree_srch.3.gz
OLD_FILES+=usr/share/man/man3/tree_trav.3.gz
# 2004XXYY: OS internal libs, no ports use them, no need to use OLD_LIBS
OLD_FILES+=lib/geom/geom_concat.so.1
OLD_FILES+=lib/geom/geom_label.so.1
OLD_FILES+=lib/geom/geom_nop.so.1
OLD_FILES+=lib/geom/geom_stripe.so.1
# 20040728: GCC 3.4.2
OLD_DIRS+=usr/include/c++/3.3
OLD_FILES+=usr/include/c++/3.3/FlexLexer.h
OLD_FILES+=usr/include/c++/3.3/algorithm
OLD_FILES+=usr/include/c++/3.3/backward/algo.h
OLD_FILES+=usr/include/c++/3.3/backward/algobase.h
OLD_FILES+=usr/include/c++/3.3/backward/alloc.h
OLD_FILES+=usr/include/c++/3.3/backward/backward_warning.h
OLD_FILES+=usr/include/c++/3.3/backward/bvector.h
OLD_FILES+=usr/include/c++/3.3/backward/complex.h
OLD_FILES+=usr/include/c++/3.3/backward/defalloc.h
OLD_FILES+=usr/include/c++/3.3/backward/deque.h
OLD_FILES+=usr/include/c++/3.3/backward/fstream.h
OLD_FILES+=usr/include/c++/3.3/backward/function.h
OLD_FILES+=usr/include/c++/3.3/backward/hash_map.h
OLD_FILES+=usr/include/c++/3.3/backward/hash_set.h
OLD_FILES+=usr/include/c++/3.3/backward/hashtable.h
OLD_FILES+=usr/include/c++/3.3/backward/heap.h
OLD_FILES+=usr/include/c++/3.3/backward/iomanip.h
OLD_FILES+=usr/include/c++/3.3/backward/iostream.h
OLD_FILES+=usr/include/c++/3.3/backward/istream.h
OLD_FILES+=usr/include/c++/3.3/backward/iterator.h
OLD_FILES+=usr/include/c++/3.3/backward/list.h
OLD_FILES+=usr/include/c++/3.3/backward/map.h
OLD_FILES+=usr/include/c++/3.3/backward/multimap.h
OLD_FILES+=usr/include/c++/3.3/backward/multiset.h
OLD_FILES+=usr/include/c++/3.3/backward/new.h
OLD_FILES+=usr/include/c++/3.3/backward/ostream.h
OLD_FILES+=usr/include/c++/3.3/backward/pair.h
OLD_FILES+=usr/include/c++/3.3/backward/queue.h
OLD_FILES+=usr/include/c++/3.3/backward/rope.h
OLD_FILES+=usr/include/c++/3.3/backward/set.h
OLD_FILES+=usr/include/c++/3.3/backward/slist.h
OLD_FILES+=usr/include/c++/3.3/backward/stack.h
OLD_FILES+=usr/include/c++/3.3/backward/stream.h
OLD_FILES+=usr/include/c++/3.3/backward/streambuf.h
OLD_FILES+=usr/include/c++/3.3/backward/strstream
OLD_FILES+=usr/include/c++/3.3/backward/strstream.h
OLD_FILES+=usr/include/c++/3.3/backward/tempbuf.h
OLD_FILES+=usr/include/c++/3.3/backward/tree.h
OLD_FILES+=usr/include/c++/3.3/backward/vector.h
OLD_DIRS+=usr/include/c++/3.3/backward
OLD_FILES+=usr/include/c++/3.3/bits/atomicity.h
OLD_FILES+=usr/include/c++/3.3/bits/basic_file.h
OLD_FILES+=usr/include/c++/3.3/bits/basic_ios.h
OLD_FILES+=usr/include/c++/3.3/bits/basic_ios.tcc
OLD_FILES+=usr/include/c++/3.3/bits/basic_string.h
OLD_FILES+=usr/include/c++/3.3/bits/basic_string.tcc
OLD_FILES+=usr/include/c++/3.3/bits/boost_concept_check.h
OLD_FILES+=usr/include/c++/3.3/bits/c++config.h
OLD_FILES+=usr/include/c++/3.3/bits/c++io.h
OLD_FILES+=usr/include/c++/3.3/bits/c++locale.h
OLD_FILES+=usr/include/c++/3.3/bits/c++locale_internal.h
OLD_FILES+=usr/include/c++/3.3/bits/char_traits.h
OLD_FILES+=usr/include/c++/3.3/bits/cmath.tcc
OLD_FILES+=usr/include/c++/3.3/bits/codecvt.h
OLD_FILES+=usr/include/c++/3.3/bits/codecvt_specializations.h
OLD_FILES+=usr/include/c++/3.3/bits/concept_check.h
OLD_FILES+=usr/include/c++/3.3/bits/cpp_type_traits.h
OLD_FILES+=usr/include/c++/3.3/bits/ctype_base.h
OLD_FILES+=usr/include/c++/3.3/bits/ctype_inline.h
OLD_FILES+=usr/include/c++/3.3/bits/ctype_noninline.h
OLD_FILES+=usr/include/c++/3.3/bits/deque.tcc
OLD_FILES+=usr/include/c++/3.3/bits/fpos.h
OLD_FILES+=usr/include/c++/3.3/bits/fstream.tcc
OLD_FILES+=usr/include/c++/3.3/bits/functexcept.h
OLD_FILES+=usr/include/c++/3.3/bits/generic_shadow.h
OLD_FILES+=usr/include/c++/3.3/bits/gslice.h
OLD_FILES+=usr/include/c++/3.3/bits/gslice_array.h
OLD_FILES+=usr/include/c++/3.3/bits/gthr-default.h
OLD_FILES+=usr/include/c++/3.3/bits/gthr-posix.h
OLD_FILES+=usr/include/c++/3.3/bits/gthr-single.h
OLD_FILES+=usr/include/c++/3.3/bits/gthr.h
OLD_FILES+=usr/include/c++/3.3/bits/indirect_array.h
OLD_FILES+=usr/include/c++/3.3/bits/ios_base.h
OLD_FILES+=usr/include/c++/3.3/bits/istream.tcc
OLD_FILES+=usr/include/c++/3.3/bits/list.tcc
OLD_FILES+=usr/include/c++/3.3/bits/locale_classes.h
OLD_FILES+=usr/include/c++/3.3/bits/locale_facets.h
OLD_FILES+=usr/include/c++/3.3/bits/locale_facets.tcc
OLD_FILES+=usr/include/c++/3.3/bits/localefwd.h
OLD_FILES+=usr/include/c++/3.3/bits/mask_array.h
OLD_FILES+=usr/include/c++/3.3/bits/messages_members.h
OLD_FILES+=usr/include/c++/3.3/bits/os_defines.h
OLD_FILES+=usr/include/c++/3.3/bits/ostream.tcc
OLD_FILES+=usr/include/c++/3.3/bits/pthread_allocimpl.h
OLD_FILES+=usr/include/c++/3.3/bits/slice.h
OLD_FILES+=usr/include/c++/3.3/bits/slice_array.h
OLD_FILES+=usr/include/c++/3.3/bits/sstream.tcc
OLD_FILES+=usr/include/c++/3.3/bits/stl_algo.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_algobase.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_alloc.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_bvector.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_construct.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_deque.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_function.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_heap.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator_base_funcs.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_iterator_base_types.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_list.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_map.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_multimap.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_multiset.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_numeric.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_pair.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_pthread_alloc.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_queue.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_raw_storage_iter.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_relops.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_set.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_stack.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_tempbuf.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_threads.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_tree.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_uninitialized.h
OLD_FILES+=usr/include/c++/3.3/bits/stl_vector.h
OLD_FILES+=usr/include/c++/3.3/bits/stream_iterator.h
OLD_FILES+=usr/include/c++/3.3/bits/streambuf.tcc
OLD_FILES+=usr/include/c++/3.3/bits/streambuf_iterator.h
OLD_FILES+=usr/include/c++/3.3/bits/stringfwd.h
OLD_FILES+=usr/include/c++/3.3/bits/time_members.h
OLD_FILES+=usr/include/c++/3.3/bits/type_traits.h
OLD_FILES+=usr/include/c++/3.3/bits/valarray_array.h
OLD_FILES+=usr/include/c++/3.3/bits/valarray_array.tcc
OLD_FILES+=usr/include/c++/3.3/bits/valarray_meta.h
OLD_FILES+=usr/include/c++/3.3/bits/vector.tcc
OLD_DIRS+=usr/include/c++/3.3/bits
OLD_FILES+=usr/include/c++/3.3/bitset
OLD_FILES+=usr/include/c++/3.3/cassert
OLD_FILES+=usr/include/c++/3.3/cctype
OLD_FILES+=usr/include/c++/3.3/cerrno
OLD_FILES+=usr/include/c++/3.3/cfloat
OLD_FILES+=usr/include/c++/3.3/ciso646
OLD_FILES+=usr/include/c++/3.3/climits
OLD_FILES+=usr/include/c++/3.3/clocale
OLD_FILES+=usr/include/c++/3.3/cmath
OLD_FILES+=usr/include/c++/3.3/complex
OLD_FILES+=usr/include/c++/3.3/csetjmp
OLD_FILES+=usr/include/c++/3.3/csignal
OLD_FILES+=usr/include/c++/3.3/cstdarg
OLD_FILES+=usr/include/c++/3.3/cstddef
OLD_FILES+=usr/include/c++/3.3/cstdio
OLD_FILES+=usr/include/c++/3.3/cstdlib
OLD_FILES+=usr/include/c++/3.3/cstring
OLD_FILES+=usr/include/c++/3.3/ctime
OLD_FILES+=usr/include/c++/3.3/cwchar
OLD_FILES+=usr/include/c++/3.3/cwctype
OLD_FILES+=usr/include/c++/3.3/cxxabi.h
OLD_FILES+=usr/include/c++/3.3/deque
OLD_FILES+=usr/include/c++/3.3/exception
OLD_FILES+=usr/include/c++/3.3/exception_defines.h
OLD_FILES+=usr/include/c++/3.3/ext/algorithm
OLD_FILES+=usr/include/c++/3.3/ext/enc_filebuf.h
OLD_FILES+=usr/include/c++/3.3/ext/functional
OLD_FILES+=usr/include/c++/3.3/ext/hash_map
OLD_FILES+=usr/include/c++/3.3/ext/hash_set
OLD_FILES+=usr/include/c++/3.3/ext/iterator
OLD_FILES+=usr/include/c++/3.3/ext/memory
OLD_FILES+=usr/include/c++/3.3/ext/numeric
OLD_FILES+=usr/include/c++/3.3/ext/rb_tree
OLD_FILES+=usr/include/c++/3.3/ext/rope
OLD_FILES+=usr/include/c++/3.3/ext/ropeimpl.h
OLD_FILES+=usr/include/c++/3.3/ext/slist
OLD_FILES+=usr/include/c++/3.3/ext/stdio_filebuf.h
OLD_FILES+=usr/include/c++/3.3/ext/stl_hash_fun.h
OLD_FILES+=usr/include/c++/3.3/ext/stl_hashtable.h
OLD_FILES+=usr/include/c++/3.3/ext/stl_rope.h
OLD_DIRS+=usr/include/c++/3.3/ext
OLD_FILES+=usr/include/c++/3.3/fstream
OLD_FILES+=usr/include/c++/3.3/functional
OLD_FILES+=usr/include/c++/3.3/iomanip
OLD_FILES+=usr/include/c++/3.3/ios
OLD_FILES+=usr/include/c++/3.3/iosfwd
OLD_FILES+=usr/include/c++/3.3/iostream
OLD_FILES+=usr/include/c++/3.3/istream
OLD_FILES+=usr/include/c++/3.3/iterator
OLD_FILES+=usr/include/c++/3.3/limits
OLD_FILES+=usr/include/c++/3.3/list
OLD_FILES+=usr/include/c++/3.3/locale
OLD_FILES+=usr/include/c++/3.3/map
OLD_FILES+=usr/include/c++/3.3/memory
OLD_FILES+=usr/include/c++/3.3/new
OLD_FILES+=usr/include/c++/3.3/numeric
OLD_FILES+=usr/include/c++/3.3/ostream
OLD_FILES+=usr/include/c++/3.3/queue
OLD_FILES+=usr/include/c++/3.3/set
OLD_FILES+=usr/include/c++/3.3/sstream
OLD_FILES+=usr/include/c++/3.3/stack
OLD_FILES+=usr/include/c++/3.3/stdexcept
OLD_FILES+=usr/include/c++/3.3/streambuf
OLD_FILES+=usr/include/c++/3.3/string
OLD_FILES+=usr/include/c++/3.3/typeinfo
OLD_FILES+=usr/include/c++/3.3/utility
OLD_FILES+=usr/include/c++/3.3/valarray
OLD_FILES+=usr/include/c++/3.3/vector
# 20040713: fla(4) removed.
OLD_FILES+=usr/share/man/man4/fla.4.gz
# 200407XX
OLD_FILES+=usr/sbin/kernbb
OLD_FILES+=usr/sbin/ntp-genkeys
OLD_FILES+=usr/sbin/ntptimeset
OLD_FILES+=usr/share/man/man8/kernbb.8.gz
OLD_FILES+=usr/share/man/man8/ntp-genkeys.8.gz
# 20040627: usbdevs.h and usbdevs_data.h removal
OLD_FILES+=usr/include/dev/usb/usbdevs.h
OLD_FILES+=usr/include/dev/usb/usbdevs_data.h
# 200406XX
OLD_FILES+=usr/bin/gasp
OLD_FILES+=usr/bin/gdbreplay
OLD_FILES+=usr/share/man/man1/gasp.1.gz
OLD_FILES+=sbin/mountd
OLD_FILES+=sbin/mount_fdesc
OLD_FILES+=sbin/mount_umap
OLD_FILES+=sbin/mount_union
OLD_FILES+=sbin/mount_msdos
OLD_FILES+=sbin/mount_null
OLD_FILES+=sbin/mount_kernfs
# 200405XX: arl
OLD_FILES+=usr/sbin/arlconfig
OLD_FILES+=usr/share/man/man8/arlconfig.8.gz
# 200403XX
OLD_FILES+=bin/raidctl
OLD_FILES+=sbin/raidctl
OLD_FILES+=usr/bin/sasc
OLD_FILES+=usr/sbin/sgsc
OLD_FILES+=usr/sbin/stlload
OLD_FILES+=usr/sbin/stlstats
OLD_FILES+=usr/share/man/man1/sasc.1.gz
OLD_FILES+=usr/share/man/man1/sgsc.1.gz
OLD_FILES+=usr/share/man/man4/i386/stl.4.gz
OLD_FILES+=usr/share/man/man8/raidctl.8.gz
# 20040229: clean_environment() was removed after 3 days
OLD_FILES+=usr/share/man/man3/clean_environment.3.gz
# 20040119: installed as `isdntel' in newer systems
OLD_FILES+=etc/isdn/isdntel.sh
# 200XYYZZ: /lib transition clitches
OLD_FILES+=lib/libalias.so
OLD_FILES+=lib/libatm.so
OLD_FILES+=lib/libbsdxml.so
OLD_FILES+=lib/libc.so
OLD_FILES+=lib/libcam.so
OLD_FILES+=lib/libcrypt.so
OLD_FILES+=lib/libcrypto.so
OLD_FILES+=lib/libdevstat.so
OLD_FILES+=lib/libedit.so
OLD_FILES+=lib/libgeom.so
OLD_FILES+=lib/libipsec.so
OLD_FILES+=lib/libipx.so
OLD_FILES+=lib/libkvm.so
OLD_FILES+=lib/libm.so
OLD_FILES+=lib/libmd.so
OLD_FILES+=lib/libncurses.so
OLD_FILES+=lib/libreadline.so
OLD_FILES+=lib/libsbuf.so
OLD_FILES+=lib/libufs.so
OLD_FILES+=lib/libz.so
# 200312XX
OLD_FILES+=bin/cxconfig
OLD_FILES+=sbin/cxconfig
OLD_FILES+=usr/share/man/man8/cxconfig.8.gz
# 20031016: MULTI_DRIVER_MODULE macro removed
OLD_FILES+=usr/share/man/man9/MULTI_DRIVER_MODULE.9.gz
# 200309XX
OLD_FILES+=usr/bin/symorder
OLD_FILES+=usr/share/man/man1/symorder.1.gz
# 200308XX
OLD_FILES+=usr/sbin/amldb
OLD_FILES+=usr/share/man/man8/amldb.8.gz
# 200307XX
OLD_FILES+=sbin/mount_nwfs
OLD_FILES+=sbin/mount_portalfs
OLD_FILES+=sbin/mount_smbfs
# 200306XX
OLD_FILES+=usr/sbin/dev_mkdb
OLD_FILES+=usr/share/man/man8/dev_mkdb.8.gz
# 200304XX
OLD_FILES+=usr/lib/libcipher.a
OLD_FILES+=usr/lib/libcipher.so
OLD_FILES+=usr/lib/libcipher_p.a
OLD_FILES+=usr/lib/libgmp.a
OLD_FILES+=usr/lib/libgmp.so
OLD_FILES+=usr/lib/libgmp_p.a
OLD_FILES+=usr/lib/libperl.a
OLD_FILES+=usr/lib/libperl.so
OLD_FILES+=usr/lib/libperl_p.a
OLD_FILES+=usr/lib/libposix1e.a
OLD_FILES+=usr/lib/libposix1e.so
OLD_FILES+=usr/lib/libposix1e_p.a
OLD_FILES+=usr/lib/libskey.a
OLD_FILES+=usr/lib/libskey.so
OLD_FILES+=usr/lib/libskey_p.a
OLD_FILES+=usr/libexec/tradcpp0
OLD_FILES+=usr/libexec/cpp0
# 200304XX: removal of xten
OLD_FILES+=usr/libexec/xtend
OLD_FILES+=usr/sbin/xten
OLD_FILES+=usr/share/man/man1/xten.1.gz
OLD_FILES+=usr/share/man/man8/xtend.8.gz
# 200303XX
OLD_FILES+=usr/lib/libacl.so
OLD_FILES+=usr/lib/libdescrypt.so
OLD_FILES+=usr/lib/libf2c.so
OLD_FILES+=usr/lib/libg++.so
OLD_FILES+=usr/lib/libkdb.so
OLD_FILES+=usr/lib/librsaINTL.so
OLD_FILES+=usr/lib/libscrypt.so
OLD_FILES+=usr/lib/libss.so
# 200302XX
OLD_FILES+=usr/lib/libacl.a
OLD_FILES+=usr/lib/libacl_p.a
OLD_FILES+=usr/lib/libkadm.a
OLD_FILES+=usr/lib/libkadm.so
OLD_FILES+=usr/lib/libkadm_p.a
OLD_FILES+=usr/lib/libkafs.a
OLD_FILES+=usr/lib/libkafs.so
OLD_FILES+=usr/lib/libkafs_p.a
OLD_FILES+=usr/lib/libkdb.a
OLD_FILES+=usr/lib/libkdb_p.a
OLD_FILES+=usr/lib/libkrb.a
OLD_FILES+=usr/lib/libkrb.so
OLD_FILES+=usr/lib/libkrb_p.a
OLD_FILES+=usr/share/man/man3/SSL_CIPHER_get_name.3.gz
OLD_FILES+=usr/share/man/man3/SSL_COMP_add_compression_method.3
OLD_FILES+=usr/share/man/man3/SSL_CTX_add_extra_chain_cert.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_add_session.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_ctrl.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_flush_sessions.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_free.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_get_verify_mode.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_load_verify_locations.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_new.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_number.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_set_cache_size.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_sess_set_get_cb.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_sessions.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cert_store.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cert_verify_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_cipher_list.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_client_CA_list.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_client_cert_cb.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_default_passwd_cb.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_generate_session_id.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_info_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_max_cert_list.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_mode.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_msg_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_options.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_quiet_shutdown.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_session_cache_mode.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_session_id_context.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_ssl_version.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_timeout.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_tmp_dh_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_tmp_rsa_callback.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_set_verify.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_use_certificate.3.gz
OLD_FILES+=usr/share/man/man3/SSL_SESSION_free.3.gz
OLD_FILES+=usr/share/man/man3/SSL_SESSION_get_ex_new_index.3.gz
OLD_FILES+=usr/share/man/man3/SSL_SESSION_get_time.3.gz
OLD_FILES+=usr/share/man/man3/SSL_accept.3.gz
OLD_FILES+=usr/share/man/man3/SSL_alert_type_string.3.gz
OLD_FILES+=usr/share/man/man3/SSL_clear.3.gz
OLD_FILES+=usr/share/man/man3/SSL_connect.3.gz
OLD_FILES+=usr/share/man/man3/SSL_do_handshake.3.gz
OLD_FILES+=usr/share/man/man3/SSL_free.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_SSL_CTX.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_ciphers.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_client_CA_list.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_current_cipher.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_default_timeout.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_error.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_ex_data_X509_STORE_CTX_idx.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_ex_new_index.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_fd.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_peer_cert_chain.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_peer_certificate.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_rbio.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_session.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_verify_result.3.gz
OLD_FILES+=usr/share/man/man3/SSL_get_version.3.gz
OLD_FILES+=usr/share/man/man3/SSL_library_init.3.gz
OLD_FILES+=usr/share/man/man3/SSL_load_client_CA_file.3.gz
OLD_FILES+=usr/share/man/man3/SSL_new.3.gz
OLD_FILES+=usr/share/man/man3/SSL_pending.3.gz
OLD_FILES+=usr/share/man/man3/SSL_read.3.gz
OLD_FILES+=usr/share/man/man3/SSL_rstate_string.3.gz
OLD_FILES+=usr/share/man/man3/SSL_session_reused.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_bio.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_connect_state.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_fd.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_session.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_shutdown.3.gz
OLD_FILES+=usr/share/man/man3/SSL_set_verify_result.3.gz
OLD_FILES+=usr/share/man/man3/SSL_shutdown.3.gz
OLD_FILES+=usr/share/man/man3/SSL_state_string.3.gz
OLD_FILES+=usr/share/man/man3/SSL_want.3.gz
OLD_FILES+=usr/share/man/man3/SSL_write.3.gz
OLD_FILES+=usr/share/man/man3/d2i_SSL_SESSION.3.gz
# 200301XX
OLD_FILES+=usr/share/man/man3/des_3cbc_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_3ecb_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_cbc_cksum.3.gz
OLD_FILES+=usr/share/man/man3/des_cbc_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_cfb_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_ecb_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_enc_read.3.gz
OLD_FILES+=usr/share/man/man3/des_enc_write.3.gz
OLD_FILES+=usr/share/man/man3/des_is_weak_key.3.gz
OLD_FILES+=usr/share/man/man3/des_key_sched.3.gz
OLD_FILES+=usr/share/man/man3/des_ofb_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_pcbc_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/des_quad_cksum.3.gz
OLD_FILES+=usr/share/man/man3/des_random_key.3.gz
OLD_FILES+=usr/share/man/man3/des_read_2password.3.gz
OLD_FILES+=usr/share/man/man3/des_read_password.3.gz
OLD_FILES+=usr/share/man/man3/des_read_pw_string.3.gz
OLD_FILES+=usr/share/man/man3/des_set_key.3.gz
OLD_FILES+=usr/share/man/man3/des_set_odd_parity.3.gz
OLD_FILES+=usr/share/man/man3/des_string_to_2key.3.gz
OLD_FILES+=usr/share/man/man3/des_string_to_key.3.gz
# 200212XX
OLD_FILES+=usr/sbin/kenv
OLD_FILES+=usr/bin/kenv
OLD_FILES+=usr/sbin/elf2aout
# 200210XX
OLD_FILES+=usr/include/libusbhid.h
OLD_FILES+=usr/share/man/man3/All_FreeBSD.3.gz
OLD_FILES+=usr/share/man/man3/CheckRules.3.gz
OLD_FILES+=usr/share/man/man3/ChunkCanBeRoot.3.gz
OLD_FILES+=usr/share/man/man3/Clone_Disk.3.gz
OLD_FILES+=usr/share/man/man3/Collapse_Chunk.3.gz
OLD_FILES+=usr/share/man/man3/Collapse_Disk.3.gz
OLD_FILES+=usr/share/man/man3/Create_Chunk.3.gz
OLD_FILES+=usr/share/man/man3/Create_Chunk_DWIM.3.gz
OLD_FILES+=usr/share/man/man3/Cyl_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Debug_Disk.3.gz
OLD_FILES+=usr/share/man/man3/Delete_Chunk.3.gz
OLD_FILES+=usr/share/man/man3/Disk_Names.3.gz
OLD_FILES+=usr/share/man/man3/Free_Disk.3.gz
OLD_FILES+=usr/share/man/man3/MakeDev.3.gz
OLD_FILES+=usr/share/man/man3/MakeDevDisk.3.gz
OLD_FILES+=usr/share/man/man3/Next_Cyl_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Next_Track_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Open_Disk.3.gz
OLD_FILES+=usr/share/man/man3/Prev_Cyl_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Prev_Track_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Set_Bios_Geom.3.gz
OLD_FILES+=usr/share/man/man3/Set_Boot_Blocks.3.gz
OLD_FILES+=usr/share/man/man3/Set_Boot_Mgr.3.gz
OLD_FILES+=usr/share/man/man3/ShowChunkFlags.3.gz
OLD_FILES+=usr/share/man/man3/Track_Aligned.3.gz
OLD_FILES+=usr/share/man/man3/Write_Disk.3.gz
OLD_FILES+=usr/share/man/man3/slice_type_name.3.gz
# 200210XX: most games moved to ports
OLD_FILES+=usr/share/man/man6/adventure.6.gz
OLD_FILES+=usr/share/man/man6/arithmetic.6.gz
OLD_FILES+=usr/share/man/man6/atc.6.gz
OLD_FILES+=usr/share/man/man6/backgammon.6.gz
OLD_FILES+=usr/share/man/man6/battlestar.6.gz
OLD_FILES+=usr/share/man/man6/bs.6.gz
OLD_FILES+=usr/share/man/man6/canfield.6.gz
OLD_FILES+=usr/share/man/man6/cfscores.6.gz
OLD_FILES+=usr/share/man/man6/cribbage.6.gz
OLD_FILES+=usr/share/man/man6/fish.6.gz
OLD_FILES+=usr/share/man/man6/hack.6.gz
OLD_FILES+=usr/share/man/man6/hangman.6.gz
OLD_FILES+=usr/share/man/man6/larn.6.gz
OLD_FILES+=usr/share/man/man6/mille.6.gz
OLD_FILES+=usr/share/man/man6/phantasia.6.gz
OLD_FILES+=usr/share/man/man6/piano.6.gz
OLD_FILES+=usr/share/man/man6/pig.6.gz
OLD_FILES+=usr/share/man/man6/quiz.6.gz
OLD_FILES+=usr/share/man/man6/rain.6.gz
OLD_FILES+=usr/share/man/man6/robots.6.gz
OLD_FILES+=usr/share/man/man6/rogue.6.gz
OLD_FILES+=usr/share/man/man6/sail.6.gz
OLD_FILES+=usr/share/man/man6/snake.6.gz
OLD_FILES+=usr/share/man/man6/snscore.6.gz
OLD_FILES+=usr/share/man/man6/trek.6.gz
OLD_FILES+=usr/share/man/man6/wargames.6.gz
OLD_FILES+=usr/share/man/man6/worm.6.gz
OLD_FILES+=usr/share/man/man6/worms.6.gz
OLD_FILES+=usr/share/man/man6/wump.6.gz
# 200207XX
OLD_FILES+=usr/share/man/man1aout/ar.1aout.gz
OLD_FILES+=usr/share/man/man1aout/as.1aout.gz
OLD_FILES+=usr/share/man/man1aout/ld.1aout.gz
OLD_FILES+=usr/share/man/man1aout/nm.1aout.gz
OLD_FILES+=usr/share/man/man1aout/ranlib.1aout.gz
OLD_FILES+=usr/share/man/man1aout/size.1aout.gz
OLD_FILES+=usr/share/man/man1aout/strings.1aout.gz
OLD_FILES+=usr/share/man/man1aout/strip.1aout.gz
OLD_FILES+=bin/mountd
OLD_FILES+=bin/nfsd
# 20020707 sbin/nfsd -> usr.sbin/nfsd
OLD_FILES+=sbin/nfsd
# 200206XX
OLD_FILES+=usr/lib/libpam_ssh.a
OLD_FILES+=usr/lib/libpam_ssh_p.a
OLD_FILES+=usr/bin/help
OLD_FILES+=usr/bin/sccs
.if ${TARGET_ARCH} != "amd64" && ${TARGET} != "arm" && ${TARGET_ARCH} != "i386" && ${TARGET} != "powerpc"
OLD_FILES+=usr/bin/gdbserver
.endif
OLD_FILES+=usr/bin/ssh-keysign
OLD_FILES+=usr/sbin/gifconfig
OLD_FILES+=usr/sbin/prefix
# 200205XX
OLD_FILES+=usr/bin/doscmd
# 200204XX
OLD_FILES+=usr/bin/a2p
OLD_FILES+=usr/bin/ptx
OLD_FILES+=usr/bin/pod2text
OLD_FILES+=usr/bin/pod2man
OLD_FILES+=usr/bin/pod2latex
OLD_FILES+=usr/bin/pod2html
OLD_FILES+=usr/bin/h2ph
OLD_FILES+=usr/bin/dprofpp
OLD_FILES+=usr/bin/c2ph
OLD_FILES+=usr/bin/h2xs
OLD_FILES+=usr/bin/pl2pm
OLD_FILES+=usr/bin/splain
OLD_FILES+=usr/bin/s2p
OLD_FILES+=usr/bin/find2perl
OLD_FILES+=usr/sbin/pkg_update
OLD_FILES+=usr/sbin/scriptdump
# 20020409 GC kget(1), userconfig is long dead.
OLD_FILES+=sbin/kget
OLD_FILES+=usr/share/man/man8/kget.8.gz
# 200203XX
OLD_FILES+=usr/lib/libss.a
OLD_FILES+=usr/lib/libss_p.a
OLD_FILES+=usr/lib/libtelnet.a
OLD_FILES+=usr/lib/libtelnet_p.a
OLD_FILES+=usr/sbin/diskpart
# 200202XX
OLD_FILES+=usr/bin/gprof4
# 200201XX
OLD_FILES+=usr/sbin/linux
# 2001XXXX
OLD_FILES+=usr/bin/joy
OLD_FILES+=usr/sbin/ibcs2
OLD_FILES+=usr/sbin/svr4
OLD_FILES+=usr/bin/chflags
OLD_FILES+=usr/sbin/uuconv
OLD_FILES+=usr/sbin/uuchk
OLD_FILES+=usr/sbin/portmap
OLD_FILES+=usr/sbin/pmap_set
OLD_FILES+=usr/sbin/pmap_dump
OLD_FILES+=usr/sbin/mcon
OLD_FILES+=usr/sbin/stlstty
OLD_FILES+=usr/sbin/ispppcontrol
OLD_FILES+=usr/sbin/rndcontrol
# 20011001: UUCP migration to ports
OLD_FILES+=usr/bin/uucp
OLD_FILES+=usr/bin/uulog
OLD_FILES+=usr/bin/uuname
OLD_FILES+=usr/bin/uupick
OLD_FILES+=usr/bin/uusched
OLD_FILES+=usr/bin/uustat
OLD_FILES+=usr/bin/uuto
OLD_FILES+=usr/bin/uux
OLD_FILES+=usr/libexec/uucp/uucico
OLD_FILES+=usr/libexec/uucp/uuxqt
OLD_FILES+=usr/libexec/uucpd
OLD_FILES+=usr/share/man/man1/uuconv.1.gz
OLD_FILES+=usr/share/man/man1/uucp.1.gz
OLD_FILES+=usr/share/man/man1/uulog.1.gz
OLD_FILES+=usr/share/man/man1/uuname.1.gz
OLD_FILES+=usr/share/man/man1/uupick.1.gz
OLD_FILES+=usr/share/man/man1/uustat.1.gz
OLD_FILES+=usr/share/man/man1/uuto.1.gz
OLD_FILES+=usr/share/man/man1/uux.1.gz
OLD_FILES+=usr/share/man/man8/uuchk.8.gz
OLD_FILES+=usr/share/man/man8/uucico.8.gz
OLD_FILES+=usr/share/man/man8/uucpd.8.gz
OLD_FILES+=usr/share/man/man8/uusched.8.gz
OLD_FILES+=usr/share/man/man8/uuxqt.8.gz
# 20010523 mount_portal -> mount_portalfs
OLD_FILES+=sbin/mount_portal
OLD_FILES+=usr/share/man/man8/mount_portal.8.gz
# 200104XX
OLD_FILES+=usr/lib/libdescrypt.a
OLD_FILES+=usr/lib/libscrypt.a
OLD_FILES+=usr/lib/libscrypt_p.a
OLD_FILES+=usr/sbin/pim6stat
OLD_FILES+=usr/sbin/pim6sd
OLD_FILES+=usr/sbin/pim6dd
# 20010217
OLD_FILES+=usr/share/doc/bind/misc/dns-setup
# 20001200
OLD_FILES+=usr/lib/libgcc_r_pic.a
# 200009XX
OLD_FILES+=usr/lib/libRSAglue.a
OLD_FILES+=usr/lib/libRSAglue.so
OLD_FILES+=usr/lib/librsaINTL.a
OLD_FILES+=usr/lib/librsaUSA.a
OLD_FILES+=usr/lib/librsaUSA.so
# 200002XX ?
OLD_FILES+=usr/lib/libf2c.a
OLD_FILES+=usr/lib/libf2c_p.a
OLD_FILES+=usr/lib/libg++.a
OLD_FILES+=usr/lib/libg++_p.a
# 20001006
OLD_FILES+=usr/bin/miniperl
# 20000810
OLD_FILES+=usr/bin/sperl
# 200001XX
OLD_FILES+=usr/sbin/apmconf
# 199911XX
OLD_FILES+=usr/sbin/ipfstat
OLD_FILES+=usr/sbin/ipmon
OLD_FILES+=usr/sbin/ipnat
OLD_FILES+=usr/sbin/bad144
OLD_FILES+=usr/sbin/wormcontrol
OLD_FILES+=usr/sbin/named-bootconf
OLD_FILES+=usr/sbin/kvm_mkdb
OLD_FILES+=usr/sbin/keyadmin
# 199909XX
OLD_FILES+=usr/lib/libdesrypt_p.a
OLD_FILES+=sbin/ft
# 199903XX
OLD_FILES+=sbin/modload
OLD_FILES+=sbin/modunload
OLD_FILES+=usr/sbin/natd
# 199812XX
OLD_FILES+=sbin/dset
# 199809XX
OLD_FILES+=sbin/scsi
OLD_FILES+=sbin/scsiformat
OLD_FILES+=usr/sbin/ncrcontrol
OLD_FILES+=usr/sbin/tickadj
# 199806XX
OLD_FILES+=usr/sbin/mkdosfs
# 199801XX
OLD_FILES+=sbin/mount_lfs
OLD_FILES+=sbin/newlfs
OLD_FILES+=sbin/dumplfs
OLD_FILES+=usr/sbin/qcamcontrol
OLD_FILES+=usr/sbin/supscan
# 1997XXXX
OLD_FILES+=usr/sbin/sysctl
OLD_FILES+=usr/sbin/ctm_scan
OLD_FILES+=usr/sbin/addgroup
OLD_FILES+=usr/sbin/rmgroup
# 1996XXXX
OLD_FILES+=sbin/rdisc
OLD_FILES+=usr/sbin/cdplay
OLD_FILES+=usr/sbin/supfilesrv
OLD_FILES+=usr/sbin/routed
OLD_FILES+=usr/sbin/lsdev
OLD_FILES+=usr/sbin/yppasswdd
## unsorted
# do we still support aout builds?
#OLD_FILES+=usr/lib/aout/c++rt0.o
#OLD_FILES+=usr/lib/aout/crt0.o
#OLD_FILES+=usr/lib/aout/gcrt0.o
#OLD_FILES+=usr/lib/aout/scrt0.o
#OLD_FILES+=usr/lib/aout/sgcrt0.o
OLD_FILES+=usr/bin/sperl5
OLD_FILES+=usr/bin/perl5.6.0
OLD_FILES+=usr/bin/sperl5.6.0
OLD_FILES+=usr/bin/perlbc
OLD_FILES+=usr/bin/perl5.00503
OLD_FILES+=usr/bin/sperl5.00503
OLD_FILES+=usr/bin/perlbug
OLD_FILES+=usr/bin/perlcc
OLD_FILES+=usr/bin/perldoc
OLD_FILES+=usr/bin/suidperl
OLD_FILES+=usr/lib/pam_ftp.so
OLD_FILES+=usr/libdata/perl/5.00503/CGI/Apache.pm
OLD_FILES+=usr/libdata/perl/5.00503/CGI/Carp.pm
OLD_FILES+=usr/libdata/perl/5.00503/CGI/Cookie.pm
OLD_FILES+=usr/libdata/perl/5.00503/CGI/Fast.pm
OLD_FILES+=usr/libdata/perl/5.00503/CGI/Push.pm
OLD_FILES+=usr/libdata/perl/5.00503/CGI/Switch.pm
OLD_FILES+=usr/libdata/perl/5.00503/CPAN/FirstTime.pm
OLD_FILES+=usr/libdata/perl/5.00503/CPAN/Nox.pm
OLD_FILES+=usr/libdata/perl/5.00503/Class/Struct.pm
OLD_FILES+=usr/libdata/perl/5.00503/Devel/SelfStubber.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Command.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Embed.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Install.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Installed.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Liblist.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_OS2.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_Unix.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_VMS.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MM_Win32.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/MakeMaker.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Manifest.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Mkbootstrap.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Mksymlists.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/Packlist.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/inst
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/testlib.pm
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/typemap
OLD_FILES+=usr/libdata/perl/5.00503/ExtUtils/xsubpp
OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Mac.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/OS2.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Unix.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/VMS.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Spec/Win32.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Basename.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/CheckTree.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Compare.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Copy.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/DosGlob.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Find.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Path.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/Spec.pm
OLD_FILES+=usr/libdata/perl/5.00503/File/stat.pm
OLD_FILES+=usr/libdata/perl/5.00503/Getopt/Long.pm
OLD_FILES+=usr/libdata/perl/5.00503/Getopt/Std.pm
OLD_FILES+=usr/libdata/perl/5.00503/I18N/Collate.pm
OLD_FILES+=usr/libdata/perl/5.00503/IPC/Open2.pm
OLD_FILES+=usr/libdata/perl/5.00503/IPC/Open3.pm
OLD_FILES+=usr/libdata/perl/5.00503/Math/BigFloat.pm
OLD_FILES+=usr/libdata/perl/5.00503/Math/BigInt.pm
OLD_FILES+=usr/libdata/perl/5.00503/Math/Complex.pm
OLD_FILES+=usr/libdata/perl/5.00503/Math/Trig.pm
OLD_FILES+=usr/libdata/perl/5.00503/Net/Ping.pm
OLD_FILES+=usr/libdata/perl/5.00503/Net/hostent.pm
OLD_FILES+=usr/libdata/perl/5.00503/Net/netent.pm
OLD_FILES+=usr/libdata/perl/5.00503/Net/protoent.pm
OLD_FILES+=usr/libdata/perl/5.00503/Net/servent.pm
OLD_FILES+=usr/libdata/perl/5.00503/Pod/Functions.pm
OLD_FILES+=usr/libdata/perl/5.00503/Pod/Html.pm
OLD_FILES+=usr/libdata/perl/5.00503/Pod/Text.pm
OLD_FILES+=usr/libdata/perl/5.00503/Search/Dict.pm
OLD_FILES+=usr/libdata/perl/5.00503/Sys/Hostname.pm
OLD_FILES+=usr/libdata/perl/5.00503/Sys/Syslog.pm
OLD_FILES+=usr/libdata/perl/5.00503/Term/Cap.pm
OLD_FILES+=usr/libdata/perl/5.00503/Term/Complete.pm
OLD_FILES+=usr/libdata/perl/5.00503/Term/ReadLine.pm
OLD_FILES+=usr/libdata/perl/5.00503/Test/Harness.pm
OLD_FILES+=usr/libdata/perl/5.00503/Text/Abbrev.pm
OLD_FILES+=usr/libdata/perl/5.00503/Text/ParseWords.pm
OLD_FILES+=usr/libdata/perl/5.00503/Text/Soundex.pm
OLD_FILES+=usr/libdata/perl/5.00503/Text/Tabs.pm
OLD_FILES+=usr/libdata/perl/5.00503/Text/Wrap.pm
OLD_FILES+=usr/libdata/perl/5.00503/Tie/Array.pm
OLD_FILES+=usr/libdata/perl/5.00503/Tie/Handle.pm
OLD_FILES+=usr/libdata/perl/5.00503/Tie/Hash.pm
OLD_FILES+=usr/libdata/perl/5.00503/Tie/RefHash.pm
OLD_FILES+=usr/libdata/perl/5.00503/Tie/Scalar.pm
OLD_FILES+=usr/libdata/perl/5.00503/Tie/SubstrHash.pm
OLD_FILES+=usr/libdata/perl/5.00503/Time/Local.pm
OLD_FILES+=usr/libdata/perl/5.00503/Time/gmtime.pm
OLD_FILES+=usr/libdata/perl/5.00503/Time/localtime.pm
OLD_FILES+=usr/libdata/perl/5.00503/Time/tm.pm
OLD_FILES+=usr/libdata/perl/5.00503/User/grent.pm
OLD_FILES+=usr/libdata/perl/5.00503/User/pwent.pm
OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/GetOptions.al
OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/FindOption.al
OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/Configure.al
OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/config.al
OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/Croak.al
OLD_FILES+=usr/libdata/perl/5.00503/auto/Getopt/Long/autosplit.ix
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Deparse.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/CC.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Debug.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Showlex.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/makeliblinks
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Bblock.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/cc_harness
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Bytecode.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Stackobj.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Xref.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Lint.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Asmdata.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Assembler.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Disassembler.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/disassemble
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/assemble
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/Terse.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B/C.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/EXTERN.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/INTERN.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/XSUB.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/XSlock.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/av.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/bytecode.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/byterun.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cc_runtime.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/config.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cop.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/cv.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/dosish.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/embed.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/embedvar.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/fakethr.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/form.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/gv.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/handy.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/hv.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/intrpvar.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/iperlsys.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/keywords.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/mg.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/nostdio.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/objXSUB.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/objpp.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/op.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/opcode.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/patchlevel.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perl.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlio.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlsdio.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlsfio.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perlvars.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/perly.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/pp.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/pp_proto.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/proto.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regcomp.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regexp.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/regnodes.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/scope.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/sv.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/thrdvar.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/thread.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/unixish.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/CORE/util.h
OLD_FILES+=usr/libdata/perl/5.00503/mach/Data/Dumper.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/File.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Select.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Socket.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Handle.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Seekable.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO/Pipe.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/SysV.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/Msg.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IPC/Semaphore.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/B.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/B.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/B/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/autosplit.ix
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/DB_File.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/DB_File.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DB_File/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/Dumper.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/Dumper.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Data/Dumper/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/.exists
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_findfile.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_expandspec.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/dl_find_symbol_anywhere.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/autosplit.ix
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/DynaLoader.a
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/DynaLoader/extralibs.ld
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/Fcntl.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/Fcntl.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Fcntl/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/IO.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/IO.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IO/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/SysV.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/SysV.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/IPC/SysV/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/NDBM_File.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/NDBM_File.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/NDBM_File/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/Opcode.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/Opcode.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Opcode/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/assert.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/tolower.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/toupper.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/closedir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/opendir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/readdir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rewinddir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/errno.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/creat.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fcntl.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgrgid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgrnam.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atan2.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/cos.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/exp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fabs.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/log.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/pow.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sin.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sqrt.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpwnam.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpwuid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/longjmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setjmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/kill.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/feof.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/siglongjmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sigsetjmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/raise.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/offsetof.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/clearerr.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fclose.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fdopen.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgetc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgets.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fileno.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fopen.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fprintf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fputc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fputs.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fread.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/freopen.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fscanf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fseek.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ferror.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fflush.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fgetpos.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fsetpos.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ftell.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fwrite.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getchar.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/gets.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/perror.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/printf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/putc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/putchar.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/puts.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/remove.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rename.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rewind.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/scanf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sprintf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sscanf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/tmpfile.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ungetc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vfprintf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vprintf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/vsprintf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/abs.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atexit.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atof.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atoi.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/atol.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/bsearch.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/calloc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/div.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/exit.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/free.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getenv.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/labs.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/ldiv.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/malloc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/qsort.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rand.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/realloc.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/srand.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/system.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memchr.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memcmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memcpy.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memmove.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/memset.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcat.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strchr.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcpy.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strcspn.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strerror.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strlen.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncat.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncmp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strncpy.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strpbrk.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strrchr.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strspn.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strstr.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/strtok.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chmod.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fstat.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/mkdir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/stat.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/umask.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/wait.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/waitpid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/gmtime.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/localtime.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/time.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/alarm.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chdir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/chown.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execl.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execle.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execlp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execv.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execve.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/execvp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/fork.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getcwd.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getegid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/geteuid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getgroups.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getlogin.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpgrp.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getpid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getppid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/getuid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/isatty.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/link.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/rmdir.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setbuf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setgid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setuid.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/setvbuf.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/sleep.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/unlink.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/utime.al
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/autosplit.ix
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/POSIX.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/POSIX.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/POSIX/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/SDBM_File.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/SDBM_File.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/SDBM_File/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/Socket.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/Socket.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Socket/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/attrs.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/attrs.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/attrs/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/re.so
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/re.bs
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/re/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/sdbm/extralibs.ld
OLD_FILES+=usr/libdata/perl/5.00503/mach/auto/Errno/.packlist
OLD_FILES+=usr/libdata/perl/5.00503/mach/Config.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/B.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/O.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/perllocal.pod
OLD_FILES+=usr/libdata/perl/5.00503/mach/DB_File.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/Errno.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/Fcntl.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/IO.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/NDBM_File.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/Safe.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/Opcode.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/ops.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/POSIX.pod
OLD_FILES+=usr/libdata/perl/5.00503/mach/POSIX.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/SDBM_File.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/Socket.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/attrs.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/re.pm
OLD_FILES+=usr/libdata/perl/5.00503/mach/_h2ph_pre.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/a.out.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_ccb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_extend.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_periph.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_queue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_sim.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt_periph.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/cam/cam_xpt_sim.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/aio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/alias.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/assert.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/bitstring.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/calendar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/camlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/com_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/com_right.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ctype.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/curses.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/db.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/des.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/devstat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/dialog.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/dirent.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/disktab.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/dlfcn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/elf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/errno.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/eti.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/fcntl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/fetch.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/float.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/floatingpoint.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/fnmatch.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/form.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/fstab.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ftpio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/fts.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/glob.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/gmp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/gnuregex.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/grp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/histedit.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ieeefp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ifaddrs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/inttypes.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/iso646.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/kvm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/libatm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/libdisk.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/libgen.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/libusb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/libutil.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/limits.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/link.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/linker_set.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/locale.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/login_cap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/malloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/FlexLexer.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/PlotFile.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/SFile.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/_G_config.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/algo.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/algobase.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/alloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/builtinbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/bvector.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/complex.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/defalloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/deque.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/editbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/floatio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/fstream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/function.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hash_map.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hash_set.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/hashtable.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/heap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/indstream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iolibio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iomanip.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/list.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostdio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iostreamP.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/istream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/iterator.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/libio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/libioP.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/map.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/multimap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/multiset.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/new.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/ostream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pair.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/parsestream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pfstream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/procbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/pthread_alloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/rope.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/ropeimpl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/set.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/slist.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stack.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stdiostream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_algo.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/tree.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_algobase.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_alloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_bvector.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_config.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_construct.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_deque.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_function.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_fun.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_map.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hash_set.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_hashtable.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_heap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_iterator.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_list.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_map.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_multimap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_multiset.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_numeric.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_pair.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_queue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_raw_storage_iter.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_relops.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_rope.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_set.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_slist.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_stack.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_tempbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_tree.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_uninitialized.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stl_vector.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/stream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/streambuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/strfile.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/strstream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/tempbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/type_traits.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g++/vector.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/math.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/md2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/md4.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/md5.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/memory.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/menu.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/mp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/mpool.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/mqueue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ncurses.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ndbm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netdb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nl_types.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nlist.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objformat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/opie.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/osreldate.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/panel.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/paths.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap-int.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap-namedb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pcap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/poll.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pthread.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pthread_np.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pwd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/radlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ranlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/regex.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/regexp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/resolv.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ripemd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rune.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/runetype.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sched.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/search.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/semaphore.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/setjmp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sgtty.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sha.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/signal.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/skey.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stab.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stand.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stdarg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stddef.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stdio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stdlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/strhash.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/string.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stringlist.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/strings.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/struct.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sysexits.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/syslog.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/taclib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/tar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/tcpd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/term.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/termcap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/termios.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/time.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/timers.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ttyent.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ucontext.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/unctrl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/unistd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/utime.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/utmp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/values.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/varargs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vgl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vis.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/zconf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/zlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/ftp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/inet.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/nameser.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/nameser_compat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/telnet.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/arpa/tftp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/assertions.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/ctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/dst.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/eventlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/heap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/irpmarshall.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/logging.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/memcluster.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/misc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/tree.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/isc/list.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ansi.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apic.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apm_bios.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/apm_segments.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asc_ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asmacros.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/asnames.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/atomic.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bootinfo.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_at386.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_memio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pc98.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_pio_ind.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cdk.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/clock.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/comstats.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/console.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cpu.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cpufunc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cputypes.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/cronyx.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/db_machdep.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/dvcfg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/elf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/endian.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/exec.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/float.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/floatingpoint.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/frame.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/globaldata.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/globals.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/gsc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_cause.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_rbch_ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_tel_ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_trace.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ieeefp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/if_wavelan_ieee.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/if_wl_wavelan.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/iic.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/in_cksum.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_bt848.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_ctx.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_fd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ioctl_meteor.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ipl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/joystick.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/limits.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/lock.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/md_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mouse.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mpapic.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/mtpr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/bus_dma.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/npx.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/param.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcaudioio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcb_ext.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pcvt_ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/perfmon.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/physio_proc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pmap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/proc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/profile.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/psl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ptrace.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/reg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/reloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/resource.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/segments.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/setjmp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/sigframe.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/signal.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/smptests.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/soundcard.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/speaker.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/specialreg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/spigot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/stdarg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/sysarch.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/trap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/tss.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/types.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/uc_device.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ucontext.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/ultrasound.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/varargs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/vm86.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/vmparam.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/wtio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/i4b_isppp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/machine/pci_cfgreg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/bootsect.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/bpb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/denode.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/direntry.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/fat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/msdosfs/msdosfsmount.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpf_compat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bpfdesc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/bridge.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ethernet.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/hostcache.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_arp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_atm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_dl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_gif.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_ieee80211.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_llc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_media.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_mib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_ppp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_pppvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_slvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_sppp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_stf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tapvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tun.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/slip.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_tunvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_types.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_vlan_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/intrq.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/iso88025.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/net_osdep.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/netisr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/pfkeyv2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ppp_comp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/ppp_defs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/radix.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/raw_cb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/route.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/slcompress.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/zlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_faith.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_arc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/net/if_gre.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/krpc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsdiskless.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsm_subs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsmount.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsnode.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsproto.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsrtt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsrvcache.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nfsv2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/nqnfs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/rpcv2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nfs/xdr_subs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/aarp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at_extern.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/at_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/ddp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/ddp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/endian.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatalk/phase2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_cm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_if.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_pcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sigmgr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_stack.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_sys.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/atm_vc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/kern_include.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/port.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netatm/queue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/netgraph.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_UI.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_async.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_bpf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_bridge.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_cisco.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_echo.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ether.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_frame_relay.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_hole.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_iface.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ksocket.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_lmi.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_message.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_mppc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_one2many.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_parse.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_ppp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_pppoe.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_pptpgre.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_rfc1490.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_sample.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_socket.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_socketvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_tee.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_tty.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_vjc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_eiface.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_etf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_device.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_l2tp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netgraph/ng_fec.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/icmp6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/icmp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_atm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_ether.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/if_fddi.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/igmp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/igmp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_gif.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_hostcache.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_pcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_systm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/in_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_auth.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_compat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_dummynet.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_ecn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_encap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fil.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_flow.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_frag.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fw.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_icmp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_mroute.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_nat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_proxy.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_state.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ipl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ipprotosw.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_fsm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_seq.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_timer.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/tcpip.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/udp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/udp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_fw2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet/ip_gre.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ah.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ah6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/icmp6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_gif.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_ifattach.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_pcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_prefix.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/in6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_ecn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_fw.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_mroute.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ip6protosw.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipcomp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipcomp6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipsec.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/ipsec6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/mld6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/nd6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/pim6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/pim6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/scope6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/tcp6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/udp6_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/esp_rijndael.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netinet6/raw_ip6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_if.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_ip.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_pcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/ipx_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_timer.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipx/spx_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/key_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/keydb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netkey/keysock.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netnatm/natm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_cfg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_conn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_file.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_lib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_ncp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_nls.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_rcfile.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_rq.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_sock.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_subr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/ncp_user.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netncp/nwerror.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/idp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/idp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_error.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_if.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/ns_pcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/sp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spidp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_timer.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netns/spp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_compr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_ihash.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_inode.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_subr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfs_vfsops.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ntfs/ntfsmount.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_mount.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_node.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/nwfs/nwfs_subr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/NXConstStr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/Object.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/Protocol.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/encoding.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/hash.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc-api.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc-list.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/objc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/runtime.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/sarray.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/thr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/objc/typedstream.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1_mac.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/bio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/blowfish.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/bn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/buffer.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/cast.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/comp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/conf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/conf_api.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/crypto.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/des.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dh.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dsa.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/dso.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/e_os.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/e_os2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ebcdic.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/evp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hmac.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/lhash.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md4.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/md5.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/mdc2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/obj_mac.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/objects.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/opensslconf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/opensslv.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pem.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pem2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pkcs12.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/pkcs7.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rand.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc4.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rc5.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ripemd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/rsa.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/safestack.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/sha.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl2.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl23.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl3.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ssl_locl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/stack.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/symhacks.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/tls1.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/tmdiff.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/txt_db.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509_vfy.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/x509v3.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/idea.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/aes.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/asn1t.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/cryptlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/des_old.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ec.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/engine.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/krb5_asn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/kssl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ocsp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ossl_typ.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui_compat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/aes_locl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/eng_int.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_4758_cca_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_aep_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_atalla_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_cswift_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_ncipher_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_nuron_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_sureware_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/hw_ubsec_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/openssl/ui_locl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/cardinfo.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/cis.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/driver.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/i82365.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pccard_nbk.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/slot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/meciareg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pcic_pci.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/pccard/pcicvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/aio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/mqueue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/posix4.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/sched.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/posix4/semaphore.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/dumprestore.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/routed.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/rwhod.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/talkd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/protocols/timed.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/chardefs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/history.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/keymaps.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/readline.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/rlconf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/rlstdc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readline/tilde.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth_des.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/auth_unix.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/clnt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/des.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/des_crypt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/key_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_clnt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/pmap_rmt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc_com.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/rpc_msg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/svc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/svc_auth.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/types.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpc/xdr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/bootparam_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/crypt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/key_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/klm_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/mount.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nfs_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_cache.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_callback.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_db.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nis_tags.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nislib.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/nlm_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rex.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rnusers.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rquota.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rstat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/rwall.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/sm_inter.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/spray.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yp_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypclnt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/yppasswd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypupdate_prot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/rpcsvc/ypxfrd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_compat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_macros.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/_pam_types.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_appl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_malloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_misc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_mod_misc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/security/pam_modules.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/mit-sipb-copyright.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/ss.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/ss/ss_err.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/_posix.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ata.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/acct.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/acl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/agpio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/aio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/assym.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/blist.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/buf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/bus.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/bus_private.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/callout.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ccdvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdefs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cdrio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/chio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/clist.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/endian.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/conf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/cons.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/consio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/copyright.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ctype.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dir.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dataacq.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/link_elf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/device_port.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/devicestat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dirent.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/disk.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/disklabel.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/diskslice.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dkstat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dmap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/domain.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/dvdio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf32.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf64.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf_common.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/elf_generic.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/errno.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/event.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/eventhandler.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/eventvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/exec.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/extattr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fbio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fcntl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/file.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/filedesc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/filio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/gmon.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact_aout.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/imgact_elf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/inflate.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/interrupt.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/inttypes.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioccom.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ioctl_compat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ipc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/jail.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/joystick.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kbio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kernel.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kthread.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ktrace.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/libkern.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/linker.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/linker_set.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/lock.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/lockf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/malloc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/md5.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/memrange.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mman.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/module.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mount.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/msg.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/msgbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mtio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/namei.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/param.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pciio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/pipe.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/poll.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/proc.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/procfs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/protosw.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ptio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ptrace.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/queue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/random.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/reboot.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/resource.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/resourcevar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/rman.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/rtprio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sbuf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/select.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sem.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/shm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/signal.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/signalvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/snoop.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/socket.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/socketvar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sockio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/soundcard.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/stat.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syscall-hide.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syscall.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysent.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syslimits.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/syslog.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/sysproto.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/systm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/taskqueue.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/termios.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/time.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timeb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timepps.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timers.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/times.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/timex.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/tprintf.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/tty.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttychars.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttycom.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttydefaults.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ttydev.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/types.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ucontext.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/ucred.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/uio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/un.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/unistd.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/unpcb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/user.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/utsname.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vmmeter.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vnioctl.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/vnode.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/wait.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/wormio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/xrpuio.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/kobj.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/link_aout.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/nlist_aout.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/mchain.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/fnv_hash.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/iconv.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/sys/md4.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/pmap.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/swap_pager.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_extern.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_kern.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_map.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_object.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_page.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_pageout.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_pager.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_param.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vm_zone.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/vm/vnode_pager.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/complex.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/stdbool.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/langinfo.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/netbios.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_conn.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_dev.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_rq.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_subr.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_tran.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netsmb/smb_trantcp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/g2c.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/telnet.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/elf-hints.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/libusbhid.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/radlib_vs.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/readpassphrase.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/wchar.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/wctype.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cast.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/castsb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cryptodev.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/cryptosoft.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/deflate.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/rijndael.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/rmd160.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/skipjack.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/crypto/xform.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ah.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ah_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/esp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/esp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipcomp.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipcomp_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipip_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipsec.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/ipsec6.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key_debug.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/key_var.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/keydb.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/keysock.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/netipsec/xform.ph
OLD_FILES+=usr/libdata/perl/5.00503/mach/bzlib.ph
OLD_FILES+=usr/libdata/perl/5.00503/pod/perl.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perl5004delta.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlapio.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlbook.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlbot.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlcall.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perldata.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perldebug.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perldelta.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perldiag.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perldsc.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlembed.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq1.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq2.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq3.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq4.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq5.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq6.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq7.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlipc.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq8.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfaq9.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlform.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlfunc.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlguts.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlhist.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perllocale.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perllol.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmod.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmodinstall.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlmodlib.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlobj.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlop.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlopentut.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlpod.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlport.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlre.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlref.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlreftut.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlrun.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsec.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlstyle.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsub.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlsyn.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlthrtut.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perltie.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perltoc.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perltoot.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perltrap.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlvar.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlxs.pod
OLD_FILES+=usr/libdata/perl/5.00503/pod/perlxstut.pod
OLD_FILES+=usr/libdata/perl/5.00503/AnyDBM_File.pm
OLD_FILES+=usr/libdata/perl/5.00503/AutoLoader.pm
OLD_FILES+=usr/libdata/perl/5.00503/AutoSplit.pm
OLD_FILES+=usr/libdata/perl/5.00503/Benchmark.pm
OLD_FILES+=usr/libdata/perl/5.00503/CGI.pm
OLD_FILES+=usr/libdata/perl/5.00503/CPAN.pm
OLD_FILES+=usr/libdata/perl/5.00503/Carp.pm
OLD_FILES+=usr/libdata/perl/5.00503/Cwd.pm
OLD_FILES+=usr/libdata/perl/5.00503/DirHandle.pm
OLD_FILES+=usr/libdata/perl/5.00503/Dumpvalue.pm
OLD_FILES+=usr/libdata/perl/5.00503/English.pm
OLD_FILES+=usr/libdata/perl/5.00503/Env.pm
OLD_FILES+=usr/libdata/perl/5.00503/Exporter.pm
OLD_FILES+=usr/libdata/perl/5.00503/Fatal.pm
OLD_FILES+=usr/libdata/perl/5.00503/FileCache.pm
OLD_FILES+=usr/libdata/perl/5.00503/FileHandle.pm
OLD_FILES+=usr/libdata/perl/5.00503/FindBin.pm
OLD_FILES+=usr/libdata/perl/5.00503/SelectSaver.pm
OLD_FILES+=usr/libdata/perl/5.00503/SelfLoader.pm
OLD_FILES+=usr/libdata/perl/5.00503/Shell.pm
OLD_FILES+=usr/libdata/perl/5.00503/Symbol.pm
OLD_FILES+=usr/libdata/perl/5.00503/Test.pm
OLD_FILES+=usr/libdata/perl/5.00503/abbrev.pl
OLD_FILES+=usr/libdata/perl/5.00503/UNIVERSAL.pm
OLD_FILES+=usr/libdata/perl/5.00503/assert.pl
OLD_FILES+=usr/libdata/perl/5.00503/autouse.pm
OLD_FILES+=usr/libdata/perl/5.00503/base.pm
OLD_FILES+=usr/libdata/perl/5.00503/bigfloat.pl
OLD_FILES+=usr/libdata/perl/5.00503/bigint.pl
OLD_FILES+=usr/libdata/perl/5.00503/bigrat.pl
OLD_FILES+=usr/libdata/perl/5.00503/blib.pm
OLD_FILES+=usr/libdata/perl/5.00503/cacheout.pl
OLD_FILES+=usr/libdata/perl/5.00503/chat2.pl
OLD_FILES+=usr/libdata/perl/5.00503/complete.pl
OLD_FILES+=usr/libdata/perl/5.00503/constant.pm
OLD_FILES+=usr/libdata/perl/5.00503/ctime.pl
OLD_FILES+=usr/libdata/perl/5.00503/diagnostics.pm
OLD_FILES+=usr/libdata/perl/5.00503/dotsh.pl
OLD_FILES+=usr/libdata/perl/5.00503/dumpvar.pl
OLD_FILES+=usr/libdata/perl/5.00503/exceptions.pl
OLD_FILES+=usr/libdata/perl/5.00503/fastcwd.pl
OLD_FILES+=usr/libdata/perl/5.00503/fields.pm
OLD_FILES+=usr/libdata/perl/5.00503/find.pl
OLD_FILES+=usr/libdata/perl/5.00503/finddepth.pl
OLD_FILES+=usr/libdata/perl/5.00503/flush.pl
OLD_FILES+=usr/libdata/perl/5.00503/ftp.pl
OLD_FILES+=usr/libdata/perl/5.00503/getcwd.pl
OLD_FILES+=usr/libdata/perl/5.00503/getopt.pl
OLD_FILES+=usr/libdata/perl/5.00503/getopts.pl
OLD_FILES+=usr/libdata/perl/5.00503/hostname.pl
OLD_FILES+=usr/libdata/perl/5.00503/importenv.pl
OLD_FILES+=usr/libdata/perl/5.00503/integer.pm
OLD_FILES+=usr/libdata/perl/5.00503/less.pm
OLD_FILES+=usr/libdata/perl/5.00503/lib.pm
OLD_FILES+=usr/libdata/perl/5.00503/locale.pm
OLD_FILES+=usr/libdata/perl/5.00503/look.pl
OLD_FILES+=usr/libdata/perl/5.00503/newgetopt.pl
OLD_FILES+=usr/libdata/perl/5.00503/open2.pl
OLD_FILES+=usr/libdata/perl/5.00503/open3.pl
OLD_FILES+=usr/libdata/perl/5.00503/overload.pm
OLD_FILES+=usr/libdata/perl/5.00503/perl5db.pl
OLD_FILES+=usr/libdata/perl/5.00503/pwd.pl
OLD_FILES+=usr/libdata/perl/5.00503/shellwords.pl
OLD_FILES+=usr/libdata/perl/5.00503/sigtrap.pm
OLD_FILES+=usr/libdata/perl/5.00503/stat.pl
OLD_FILES+=usr/libdata/perl/5.00503/strict.pm
OLD_FILES+=usr/libdata/perl/5.00503/subs.pm
OLD_FILES+=usr/libdata/perl/5.00503/syslog.pl
OLD_FILES+=usr/libdata/perl/5.00503/tainted.pl
OLD_FILES+=usr/libdata/perl/5.00503/termcap.pl
OLD_FILES+=usr/libdata/perl/5.00503/timelocal.pl
OLD_FILES+=usr/libdata/perl/5.00503/validate.pl
OLD_FILES+=usr/libdata/perl/5.00503/vars.pm
OLD_FILES+=usr/libdata/perl/5.00503/re.pm
OLD_FILES+=usr/libdata/perl/5.00503/Config.pm
OLD_FILES+=usr/libdata/perl/5.00503/.exists
OLD_FILES+=usr/libdata/perl/5.00503/DynaLoader.pm
OLD_FILES+=usr/share/perl/man/man3/AnyDBM_File.3.gz
OLD_FILES+=usr/share/perl/man/man3/AutoLoader.3.gz
OLD_FILES+=usr/share/perl/man/man3/AutoSplit.3.gz
OLD_FILES+=usr/share/perl/man/man3/B.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Asmdata.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Assembler.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Bblock.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Bytecode.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::C.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::CC.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Debug.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Deparse.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Disassembler.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Lint.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Showlex.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Stackobj.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Terse.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Xref.3.gz
OLD_FILES+=usr/share/perl/man/man3/Benchmark.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Apache.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Carp.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Cookie.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Fast.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Push.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Switch.3.gz
OLD_FILES+=usr/share/perl/man/man3/CPAN.3.gz
OLD_FILES+=usr/share/perl/man/man3/CPAN::FirstTime.3.gz
OLD_FILES+=usr/share/perl/man/man3/CPAN::Nox.3.gz
OLD_FILES+=usr/share/perl/man/man3/Carp.3.gz
OLD_FILES+=usr/share/perl/man/man3/Class::Struct.3.gz
OLD_FILES+=usr/share/perl/man/man3/Config.3.gz
OLD_FILES+=usr/share/perl/man/man3/Cwd.3.gz
OLD_FILES+=usr/share/perl/man/man3/DB_File.3.gz
OLD_FILES+=usr/share/perl/man/man3/Data::Dumper.3.gz
OLD_FILES+=usr/share/perl/man/man3/Devel::SelfStubber.3.gz
OLD_FILES+=usr/share/perl/man/man3/DirHandle.3.gz
OLD_FILES+=usr/share/perl/man/man3/Dumpvalue.3.gz
OLD_FILES+=usr/share/perl/man/man3/DynaLoader.3.gz
OLD_FILES+=usr/share/perl/man/man3/English.3.gz
OLD_FILES+=usr/share/perl/man/man3/Env.3.gz
OLD_FILES+=usr/share/perl/man/man3/Exporter.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Command.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Embed.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Install.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Installed.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Liblist.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_OS2.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Unix.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_VMS.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Win32.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MakeMaker.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Manifest.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Mkbootstrap.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Mksymlists.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::Packlist.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::testlib.3.gz
OLD_FILES+=usr/share/perl/man/man3/Fatal.3.gz
OLD_FILES+=usr/share/perl/man/man3/Fcntl.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Basename.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::CheckTree.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Compare.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Copy.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::DosGlob.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Find.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Path.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Spec.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Spec::Mac.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Spec::OS2.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Spec::Unix.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Spec::VMS.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Spec::Win32.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::stat.3.gz
OLD_FILES+=usr/share/perl/man/man3/FileCache.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO.3.gz
OLD_FILES+=usr/share/perl/man/man3/FileHandle.3.gz
OLD_FILES+=usr/share/perl/man/man3/FindBin.3.gz
OLD_FILES+=usr/share/perl/man/man3/GDBM_File.3.gz
OLD_FILES+=usr/share/perl/man/man3/Getopt::Long.3.gz
OLD_FILES+=usr/share/perl/man/man3/Getopt::Std.3.gz
OLD_FILES+=usr/share/perl/man/man3/I18N::Collate.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::File.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Handle.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Pipe.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Seekable.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Select.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Socket.3.gz
OLD_FILES+=usr/share/perl/man/man3/IPC::Msg.3.gz
OLD_FILES+=usr/share/perl/man/man3/IPC::Open2.3.gz
OLD_FILES+=usr/share/perl/man/man3/IPC::Open3.3.gz
OLD_FILES+=usr/share/perl/man/man3/IPC::Semaphore.3.gz
OLD_FILES+=usr/share/perl/man/man3/IPC::SysV.3.gz
OLD_FILES+=usr/share/perl/man/man3/Math::BigFloat.3.gz
OLD_FILES+=usr/share/perl/man/man3/Math::BigInt.3.gz
OLD_FILES+=usr/share/perl/man/man3/Math::Complex.3.gz
OLD_FILES+=usr/share/perl/man/man3/Math::Trig.3.gz
OLD_FILES+=usr/share/perl/man/man3/NDBM_File.3.gz
OLD_FILES+=usr/share/perl/man/man3/Net::Ping.3.gz
OLD_FILES+=usr/share/perl/man/man3/Net::hostent.3.gz
OLD_FILES+=usr/share/perl/man/man3/Net::netent.3.gz
OLD_FILES+=usr/share/perl/man/man3/Net::protoent.3.gz
OLD_FILES+=usr/share/perl/man/man3/Net::servent.3.gz
OLD_FILES+=usr/share/perl/man/man3/O.3.gz
OLD_FILES+=usr/share/perl/man/man3/ODBM_File.3.gz
OLD_FILES+=usr/share/perl/man/man3/Opcode.3.gz
OLD_FILES+=usr/share/perl/man/man3/POSIX.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Html.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Text.3.gz
OLD_FILES+=usr/share/perl/man/man3/SDBM_File.3.gz
OLD_FILES+=usr/share/perl/man/man3/Safe.3.gz
OLD_FILES+=usr/share/perl/man/man3/Search::Dict.3.gz
OLD_FILES+=usr/share/perl/man/man3/SelectSaver.3.gz
OLD_FILES+=usr/share/perl/man/man3/SelfLoader.3.gz
OLD_FILES+=usr/share/perl/man/man3/Shell.3.gz
OLD_FILES+=usr/share/perl/man/man3/Socket.3.gz
OLD_FILES+=usr/share/perl/man/man3/Symbol.3.gz
OLD_FILES+=usr/share/perl/man/man3/re.3.gz
OLD_FILES+=usr/share/perl/man/man3/Sys::Hostname.3.gz
OLD_FILES+=usr/share/perl/man/man3/Sys::Syslog.3.gz
OLD_FILES+=usr/share/perl/man/man3/Term::Cap.3.gz
OLD_FILES+=usr/share/perl/man/man3/Term::Complete.3.gz
OLD_FILES+=usr/share/perl/man/man3/Term::ReadLine.3.gz
OLD_FILES+=usr/share/perl/man/man3/Test.3.gz
OLD_FILES+=usr/share/perl/man/man3/Test::Harness.3.gz
OLD_FILES+=usr/share/perl/man/man3/Text::Abbrev.3.gz
OLD_FILES+=usr/share/perl/man/man3/Text::ParseWords.3.gz
OLD_FILES+=usr/share/perl/man/man3/Text::Soundex.3.gz
OLD_FILES+=usr/share/perl/man/man3/Text::Tabs.3.gz
OLD_FILES+=usr/share/perl/man/man3/Text::Wrap.3.gz
OLD_FILES+=usr/share/perl/man/man3/Thread.3.gz
OLD_FILES+=usr/share/perl/man/man3/Thread::Queue.3.gz
OLD_FILES+=usr/share/perl/man/man3/Thread::Semaphore.3.gz
OLD_FILES+=usr/share/perl/man/man3/Thread::Signal.3.gz
OLD_FILES+=usr/share/perl/man/man3/Thread::Specific.3.gz
OLD_FILES+=usr/share/perl/man/man3/Tie::Array.3.gz
OLD_FILES+=usr/share/perl/man/man3/Tie::Handle.3.gz
OLD_FILES+=usr/share/perl/man/man3/Tie::Hash.3.gz
OLD_FILES+=usr/share/perl/man/man3/Tie::RefHash.3.gz
OLD_FILES+=usr/share/perl/man/man3/Tie::Scalar.3.gz
OLD_FILES+=usr/share/perl/man/man3/Tie::SubstrHash.3.gz
OLD_FILES+=usr/share/perl/man/man3/Time::Local.3.gz
OLD_FILES+=usr/share/perl/man/man3/Time::gmtime.3.gz
OLD_FILES+=usr/share/perl/man/man3/Time::localtime.3.gz
OLD_FILES+=usr/share/perl/man/man3/Time::tm.3.gz
OLD_FILES+=usr/share/perl/man/man3/UNIVERSAL.3.gz
OLD_FILES+=usr/share/perl/man/man3/User::grent.3.gz
OLD_FILES+=usr/share/perl/man/man3/User::pwent.3.gz
OLD_FILES+=usr/share/perl/man/man3/attrs.3.gz
OLD_FILES+=usr/share/perl/man/man3/autouse.3.gz
OLD_FILES+=usr/share/perl/man/man3/base.3.gz
OLD_FILES+=usr/share/perl/man/man3/blib.3.gz
OLD_FILES+=usr/share/perl/man/man3/constant.3.gz
OLD_FILES+=usr/share/perl/man/man3/diagnostics.3.gz
OLD_FILES+=usr/share/perl/man/man3/fields.3.gz
OLD_FILES+=usr/share/perl/man/man3/integer.3.gz
OLD_FILES+=usr/share/perl/man/man3/less.3.gz
OLD_FILES+=usr/share/perl/man/man3/lib.3.gz
OLD_FILES+=usr/share/perl/man/man3/locale.3.gz
OLD_FILES+=usr/share/perl/man/man3/ops.3.gz
OLD_FILES+=usr/share/perl/man/man3/overload.3.gz
OLD_FILES+=usr/share/perl/man/man3/sigtrap.3.gz
OLD_FILES+=usr/share/perl/man/man3/strict.3.gz
OLD_FILES+=usr/share/perl/man/man3/subs.3.gz
OLD_FILES+=usr/share/perl/man/man3/vars.3.gz
OLD_FILES+=usr/share/perl/man/man3/B::Stash.3.gz
OLD_FILES+=usr/share/perl/man/man3/ByteLoader.3.gz
OLD_FILES+=usr/share/perl/man/man3/CGI::Pretty.3.gz
OLD_FILES+=usr/share/perl/man/man3/Carp::Heavy.3.gz
OLD_FILES+=usr/share/perl/man/man3/DB.3.gz
OLD_FILES+=usr/share/perl/man/man3/DProf::DProf.3.gz
OLD_FILES+=usr/share/perl/man/man3/Exporter::Heavy.3.gz
OLD_FILES+=usr/share/perl/man/man3/ExtUtils::MM_Cygwin.3.gz
OLD_FILES+=usr/share/perl/man/man3/File::Glob.3.gz
OLD_FILES+=usr/share/perl/man/man3/Glob::Glob.3.gz
OLD_FILES+=usr/share/perl/man/man3/Hostname::Hostname.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Dir.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Poll.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Socket::INET.3.gz
OLD_FILES+=usr/share/perl/man/man3/IO::Socket::UNIX.3.gz
OLD_FILES+=usr/share/perl/man/man3/Peek::Peek.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Checker.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Find.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::InputObjects.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Man.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::ParseUtils.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Parser.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Plainer.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Select.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Text::Color.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Text::Termcap.3.gz
OLD_FILES+=usr/share/perl/man/man3/Pod::Usage.3.gz
OLD_FILES+=usr/share/perl/man/man3/Syslog::Syslog.3.gz
OLD_FILES+=usr/share/perl/man/man3/Term::ANSIColor.3.gz
OLD_FILES+=usr/share/perl/man/man3/XSLoader.3.gz
OLD_FILES+=usr/share/perl/man/man3/attributes.3.gz
OLD_FILES+=usr/share/perl/man/man3/bytes.3.gz
OLD_FILES+=usr/share/perl/man/man3/charnames.3.gz
OLD_FILES+=usr/share/perl/man/man3/filetest.3.gz
OLD_FILES+=usr/share/perl/man/man3/open.3.gz
OLD_FILES+=usr/share/perl/man/man3/utf8.3.gz
OLD_FILES+=usr/share/perl/man/man3/warnings.3.gz
OLD_FILES+=usr/share/perl/man/man3/warnings::register.3.gz
OLD_FILES+=usr/share/perl/man/whatis
OLD_FILES+=usr/share/man/man1/CA.pl.1.gz
OLD_FILES+=usr/share/man/man1/asn1parse.1.gz
OLD_FILES+=usr/share/man/man1/ca.1.gz
OLD_FILES+=usr/share/man/man1/ciphers.1.gz
OLD_FILES+=usr/share/man/man1/config.1.gz
OLD_FILES+=usr/share/man/man1/crl.1.gz
OLD_FILES+=usr/share/man/man1/crl2pkcs7.1.gz
OLD_FILES+=usr/share/man/man1/dgst.1.gz
OLD_FILES+=usr/share/man/man1/dhparam.1.gz
OLD_FILES+=usr/share/man/man1/doscmd.1.gz
OLD_FILES+=usr/share/man/man1/dsa.1.gz
OLD_FILES+=usr/share/man/man1/dsaparam.1.gz
OLD_FILES+=usr/share/man/man1/enc.1.gz
OLD_FILES+=usr/share/man/man1/gendsa.1.gz
OLD_FILES+=usr/share/man/man1/genrsa.1.gz
OLD_FILES+=usr/share/man/man1/getNAME.1.gz
OLD_FILES+=usr/share/man/man1/nseq.1.gz
OLD_FILES+=usr/share/man/man1/ocsp.1.gz
OLD_FILES+=usr/share/man/man1/openssl.1.gz
OLD_FILES+=usr/share/man/man1/perl.1.gz
OLD_FILES+=usr/share/man/man1/perl5004delta.1.gz
OLD_FILES+=usr/share/man/man1/perlapio.1.gz
OLD_FILES+=usr/share/man/man1/perlbook.1.gz
OLD_FILES+=usr/share/man/man1/perlbot.1.gz
OLD_FILES+=usr/share/man/man1/perlcall.1.gz
OLD_FILES+=usr/share/man/man1/perldata.1.gz
OLD_FILES+=usr/share/man/man1/perldebug.1.gz
OLD_FILES+=usr/share/man/man1/perldelta.1.gz
OLD_FILES+=usr/share/man/man1/perldiag.1.gz
OLD_FILES+=usr/share/man/man1/perldsc.1.gz
OLD_FILES+=usr/share/man/man1/perlembed.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq1.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq2.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq3.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq4.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq5.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq6.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq7.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq8.1.gz
OLD_FILES+=usr/share/man/man1/perlfaq9.1.gz
OLD_FILES+=usr/share/man/man1/perlform.1.gz
OLD_FILES+=usr/share/man/man1/perlfunc.1.gz
OLD_FILES+=usr/share/man/man1/perlguts.1.gz
OLD_FILES+=usr/share/man/man1/perlhist.1.gz
OLD_FILES+=usr/share/man/man1/perlipc.1.gz
OLD_FILES+=usr/share/man/man1/perllocale.1.gz
OLD_FILES+=usr/share/man/man1/perllol.1.gz
OLD_FILES+=usr/share/man/man1/perlmod.1.gz
OLD_FILES+=usr/share/man/man1/perlmodinstall.1.gz
OLD_FILES+=usr/share/man/man1/perlmodlib.1.gz
OLD_FILES+=usr/share/man/man1/perlobj.1.gz
OLD_FILES+=usr/share/man/man1/perlop.1.gz
OLD_FILES+=usr/share/man/man1/perlopentut.1.gz
OLD_FILES+=usr/share/man/man1/perlpod.1.gz
OLD_FILES+=usr/share/man/man1/perlport.1.gz
OLD_FILES+=usr/share/man/man1/perlre.1.gz
OLD_FILES+=usr/share/man/man1/perlref.1.gz
OLD_FILES+=usr/share/man/man1/perlreftut.1.gz
OLD_FILES+=usr/share/man/man1/perlrun.1.gz
OLD_FILES+=usr/share/man/man1/perlsec.1.gz
OLD_FILES+=usr/share/man/man1/perlstyle.1.gz
OLD_FILES+=usr/share/man/man1/perlsub.1.gz
OLD_FILES+=usr/share/man/man1/perlsyn.1.gz
OLD_FILES+=usr/share/man/man1/perlthrtut.1.gz
OLD_FILES+=usr/share/man/man1/perltie.1.gz
OLD_FILES+=usr/share/man/man1/perltoc.1.gz
OLD_FILES+=usr/share/man/man1/perltoot.1.gz
OLD_FILES+=usr/share/man/man1/perltrap.1.gz
OLD_FILES+=usr/share/man/man1/perlvar.1.gz
OLD_FILES+=usr/share/man/man1/perlxs.1.gz
OLD_FILES+=usr/share/man/man1/perlxstut.1.gz
OLD_FILES+=usr/share/man/man1/perlbug.1.gz
OLD_FILES+=usr/share/man/man1/perlcc.1.gz
OLD_FILES+=usr/share/man/man1/perldoc.1.gz
OLD_FILES+=usr/share/man/man1/perl5005delta.1.gz
OLD_FILES+=usr/share/man/man1/perlfork.1.gz
OLD_FILES+=usr/share/man/man1/perlboot.1.gz
OLD_FILES+=usr/share/man/man1/perltootc.1.gz
OLD_FILES+=usr/share/man/man1/perldbmfilter.1.gz
OLD_FILES+=usr/share/man/man1/perldebguts.1.gz
OLD_FILES+=usr/share/man/man1/perlnumber.1.gz
OLD_FILES+=usr/share/man/man1/perlcompile.1.gz
OLD_FILES+=usr/share/man/man1/perltodo.1.gz
OLD_FILES+=usr/share/man/man1/perlapi.1.gz
OLD_FILES+=usr/share/man/man1/perlintern.1.gz
OLD_FILES+=usr/share/man/man1/perlhack.1.gz
OLD_FILES+=usr/share/man/man1/perlbc.1.gz
OLD_FILES+=usr/share/man/man1/pkcs12.1.gz
OLD_FILES+=usr/share/man/man1/pkcs7.1.gz
OLD_FILES+=usr/share/man/man1/pkcs8.1.gz
OLD_FILES+=usr/share/man/man1/rand.1.gz
OLD_FILES+=usr/share/man/man1/req.1.gz
OLD_FILES+=usr/share/man/man1/rsa.1.gz
OLD_FILES+=usr/share/man/man1/rsautl.1.gz
OLD_FILES+=usr/share/man/man1/s_client.1.gz
OLD_FILES+=usr/share/man/man1/s_server.1.gz
OLD_FILES+=usr/share/man/man1/sess_id.1.gz
OLD_FILES+=usr/share/man/man1/smime.1.gz
OLD_FILES+=usr/share/man/man1/speed.1.gz
OLD_FILES+=usr/share/man/man1/spkac.1.gz
OLD_FILES+=usr/share/man/man1/verify.1.gz
OLD_FILES+=usr/share/man/man1/version.1.gz
OLD_FILES+=usr/share/man/man1/x509.1.gz
OLD_FILES+=usr/share/man/man3/SSL_COMP_add_compression_method.3.gz
OLD_FILES+=usr/share/man/man3/SSL_CTX_get_ex_new_index.3.gz
OLD_FILES+=usr/share/man/man3/archive_entry_dup.3.gz
OLD_FILES+=usr/share/man/man3/archive_entry_set_tartype.3.gz
OLD_FILES+=usr/share/man/man3/archive_entry_tartype.3.gz
OLD_FILES+=usr/share/man/man3/archive_read_data_into_file.3.gz
OLD_FILES+=usr/share/man/man3/archive_read_open_tar.3.gz
OLD_FILES+=usr/share/man/man3/archive_read_support_format_gnutar.3.gz
OLD_FILES+=usr/share/man/man3/cipher.3.gz
OLD_FILES+=usr/share/man/man3/des_cipher.3.gz
OLD_FILES+=usr/share/man/man3/des_setkey.3.gz
OLD_FILES+=usr/share/man/man3/encrypt.3.gz
OLD_FILES+=usr/share/man/man3/endvfsent.3.gz
OLD_FILES+=usr/share/man/man3/getvfsbytype.3.gz
OLD_FILES+=usr/share/man/man3/getvfsent.3.gz
OLD_FILES+=usr/share/man/man3/isnanf.3.gz
OLD_FILES+=usr/share/man/man3/libautofs.3.gz
OLD_FILES+=usr/share/man/man3/pthread_attr_setsstack.3.gz
OLD_FILES+=usr/share/man/man3/pthread_getcancelstate.3.gz
OLD_FILES+=usr/share/man/man3/pthread_mutexattr_getpshared.3.gz
OLD_FILES+=usr/share/man/man3/pthread_mutexattr_setpshared.3.gz
OLD_FILES+=usr/share/man/man3/set_assertion_failure_callback.3.gz
OLD_FILES+=usr/share/man/man3/setkey.3.gz
OLD_FILES+=usr/share/man/man3/setvfsent.3.gz
OLD_FILES+=usr/share/man/man3/ssl.3.gz
OLD_FILES+=usr/share/man/man3/vfsisloadable.3.gz
OLD_FILES+=usr/share/man/man3/vfsload.3.gz
OLD_FILES+=usr/share/man/man4/als4000.4.gz
OLD_FILES+=usr/share/man/man4/csa.4.gz
OLD_FILES+=usr/share/man/man4/emu10k1.4.gz
OLD_FILES+=usr/share/man/man4/euc.4.gz
OLD_FILES+=usr/share/man/man4/gusc.4.gz
OLD_FILES+=usr/share/man/man4/if_fwp.4.gz
OLD_FILES+=usr/share/man/man4/lomac.4.gz
OLD_FILES+=usr/share/man/man4/maestro3.4.gz
OLD_FILES+=usr/share/man/man4/raid.4.gz
OLD_FILES+=usr/share/man/man4/sbc.4.gz
OLD_FILES+=usr/share/man/man4/sd.4.gz
OLD_FILES+=usr/share/man/man4/snc.4.gz
OLD_FILES+=usr/share/man/man4/st.4.gz
OLD_FILES+=usr/share/man/man4/uaudio.4.gz
OLD_FILES+=usr/share/man/man4/utf2.4.gz
OLD_FILES+=usr/share/man/man4/vinumdebug.4.gz
OLD_FILES+=usr/share/man/man5/disklabel.5.gz
OLD_FILES+=usr/share/man/man5/dm.conf.5.gz
OLD_FILES+=usr/share/man/man5/ranlib.5.gz
OLD_FILES+=usr/share/man/man5/utf2.5.gz
OLD_FILES+=usr/share/man/man7/groff_mwww.7.gz
OLD_FILES+=usr/share/man/man7/mmroff.7.gz
OLD_FILES+=usr/share/man/man7/mwww.7.gz
OLD_FILES+=usr/share/man/man7/style.perl.7.gz
OLD_FILES+=usr/share/man/man8/apm.8.gz
OLD_FILES+=usr/share/man/man8/apmconf.8.gz
OLD_FILES+=usr/share/man/man8/apmd.8.gz
OLD_FILES+=usr/share/man/man8/dm.8.gz
OLD_FILES+=usr/share/man/man8/pam_ftp.8.gz
OLD_FILES+=usr/share/man/man8/pam_wheel.8.gz
OLD_FILES+=usr/share/man/man8/sconfig.8.gz
OLD_FILES+=usr/share/man/man8/ssl.8.gz
OLD_FILES+=usr/share/man/man8/wlconfig.8.gz
OLD_FILES+=usr/share/man/man9/CURSIG.9.gz
OLD_FILES+=usr/share/man/man9/VFS_INIT.9.gz
OLD_FILES+=usr/share/man/man9/at_exit.9.gz
OLD_FILES+=usr/share/man/man9/at_fork.9.gz
OLD_FILES+=usr/share/man/man9/cdevsw_add.9.gz
OLD_FILES+=usr/share/man/man9/cdevsw_remove.9.gz
OLD_FILES+=usr/share/man/man9/cv_waitq_empty.9.gz
OLD_FILES+=usr/share/man/man9/cv_waitq_remove.9.gz
OLD_FILES+=usr/share/man/man9/endtsleep.9.gz
OLD_FILES+=usr/share/man/man9/jumbo.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_freem.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_pg_alloc.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_pg_free.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_pg_steal.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_phys_to_kva.9.gz
OLD_FILES+=usr/share/man/man9/jumbo_vm_init.9.gz
OLD_FILES+=usr/share/man/man9/mac_biba.9.gz
OLD_FILES+=usr/share/man/man9/mac_bsdextended.9.gz
OLD_FILES+=usr/share/man/man9/mono_time.9.gz
OLD_FILES+=usr/share/man/man9/p1003_1b.9.gz
OLD_FILES+=usr/share/man/man9/pmap_prefault.9.gz
OLD_FILES+=usr/share/man/man9/posix4.9.gz
OLD_FILES+=usr/share/man/man9/resource_query_name.9.gz
OLD_FILES+=usr/share/man/man9/resource_query_string.9.gz
OLD_FILES+=usr/share/man/man9/resource_query_unit.9.gz
OLD_FILES+=usr/share/man/man9/rm_at_exit.9.gz
OLD_FILES+=usr/share/man/man9/rm_at_fork.9.gz
OLD_FILES+=usr/share/man/man9/runtime.9.gz
OLD_FILES+=usr/share/man/man9/sleepinit.9.gz
OLD_FILES+=usr/share/man/man9/unsleep.9.gz
OLD_FILES+=usr/share/man/ja/man1/perl.1.gz
OLD_FILES+=usr/share/games/atc/Game_List
OLD_FILES+=usr/share/games/atc/Killer
OLD_FILES+=usr/share/games/atc/crossover
OLD_FILES+=usr/share/games/atc/default
OLD_FILES+=usr/share/games/atc/easy
OLD_FILES+=usr/share/games/atc/game_2
OLD_FILES+=usr/share/games/larn/larnmaze
OLD_FILES+=usr/share/games/larn/larnopts
OLD_FILES+=usr/share/games/larn/larn.help
OLD_FILES+=usr/share/games/quiz.db/africa
OLD_FILES+=usr/share/games/quiz.db/america
OLD_FILES+=usr/share/games/quiz.db/areas
OLD_FILES+=usr/share/games/quiz.db/arith
OLD_FILES+=usr/share/games/quiz.db/asia
OLD_FILES+=usr/share/games/quiz.db/babies
OLD_FILES+=usr/share/games/quiz.db/bard
OLD_FILES+=usr/share/games/quiz.db/chinese
OLD_FILES+=usr/share/games/quiz.db/collectives
OLD_FILES+=usr/share/games/quiz.db/ed
OLD_FILES+=usr/share/games/quiz.db/elements
OLD_FILES+=usr/share/games/quiz.db/europe
OLD_FILES+=usr/share/games/quiz.db/flowers
OLD_FILES+=usr/share/games/quiz.db/greek
OLD_FILES+=usr/share/games/quiz.db/inca
OLD_FILES+=usr/share/games/quiz.db/index
OLD_FILES+=usr/share/games/quiz.db/latin
OLD_FILES+=usr/share/games/quiz.db/locomotive
OLD_FILES+=usr/share/games/quiz.db/midearth
OLD_FILES+=usr/share/games/quiz.db/morse
OLD_FILES+=usr/share/games/quiz.db/murders
OLD_FILES+=usr/share/games/quiz.db/poetry
OLD_FILES+=usr/share/games/quiz.db/posneg
OLD_FILES+=usr/share/games/quiz.db/pres
OLD_FILES+=usr/share/games/quiz.db/province
OLD_FILES+=usr/share/games/quiz.db/seq-easy
OLD_FILES+=usr/share/games/quiz.db/seq-hard
OLD_FILES+=usr/share/games/quiz.db/sexes
OLD_FILES+=usr/share/games/quiz.db/sov
OLD_FILES+=usr/share/games/quiz.db/spell
OLD_FILES+=usr/share/games/quiz.db/state
OLD_FILES+=usr/share/games/quiz.db/trek
OLD_FILES+=usr/share/games/quiz.db/ucc
OLD_FILES+=usr/share/games/cribbage.instr
OLD_FILES+=usr/share/games/fish.instr
OLD_FILES+=usr/share/games/wump.info
OLD_FILES+=usr/games/hide/adventure
OLD_FILES+=usr/games/hide/arithmetic
OLD_FILES+=usr/games/hide/atc
OLD_FILES+=usr/games/hide/backgammon
OLD_FILES+=usr/games/hide/teachgammon
OLD_FILES+=usr/games/hide/battlestar
OLD_FILES+=usr/games/hide/bs
OLD_FILES+=usr/games/hide/canfield
OLD_FILES+=usr/games/hide/cribbage
OLD_FILES+=usr/games/hide/fish
OLD_FILES+=usr/games/hide/hack
OLD_FILES+=usr/games/hide/hangman
OLD_FILES+=usr/games/hide/larn
OLD_FILES+=usr/games/hide/mille
OLD_FILES+=usr/games/hide/phantasia
OLD_FILES+=usr/games/hide/quiz
OLD_FILES+=usr/games/hide/robots
OLD_FILES+=usr/games/hide/rogue
OLD_FILES+=usr/games/hide/sail
OLD_FILES+=usr/games/hide/snake
OLD_FILES+=usr/games/hide/trek
OLD_FILES+=usr/games/hide/worm
OLD_FILES+=usr/games/hide/wump
OLD_FILES+=usr/games/adventure
OLD_FILES+=usr/games/arithmetic
OLD_FILES+=usr/games/atc
OLD_FILES+=usr/games/backgammon
OLD_FILES+=usr/games/teachgammon
OLD_FILES+=usr/games/battlestar
OLD_FILES+=usr/games/bs
OLD_FILES+=usr/games/canfield
OLD_FILES+=usr/games/cfscores
OLD_FILES+=usr/games/cribbage
OLD_FILES+=usr/games/dm
OLD_FILES+=usr/games/fish
OLD_FILES+=usr/games/hack
OLD_FILES+=usr/games/hangman
OLD_FILES+=usr/games/larn
OLD_FILES+=usr/games/mille
OLD_FILES+=usr/games/phantasia
OLD_FILES+=usr/games/piano
OLD_FILES+=usr/games/pig
OLD_FILES+=usr/games/quiz
OLD_FILES+=usr/games/rain
OLD_FILES+=usr/games/robots
OLD_FILES+=usr/games/rogue
OLD_FILES+=usr/games/sail
OLD_FILES+=usr/games/snake
OLD_FILES+=usr/games/snscore
OLD_FILES+=usr/games/trek
OLD_FILES+=usr/games/wargames
OLD_FILES+=usr/games/worm
OLD_FILES+=usr/games/worms
OLD_FILES+=usr/games/wump
OLD_FILES+=sbin/mount_reiserfs
OLD_FILES+=usr/include/cam/cam_extend.h
OLD_FILES+=usr/include/dev/wi/wi_hostap.h
OLD_FILES+=usr/include/disktab.h
OLD_FILES+=usr/include/g++/FlexLexer.h
OLD_FILES+=usr/include/g++/PlotFile.h
OLD_FILES+=usr/include/g++/SFile.h
OLD_FILES+=usr/include/g++/_G_config.h
OLD_FILES+=usr/include/g++/algo.h
OLD_FILES+=usr/include/g++/algobase.h
OLD_FILES+=usr/include/g++/algorithm
OLD_FILES+=usr/include/g++/alloc.h
OLD_FILES+=usr/include/g++/bitset
OLD_FILES+=usr/include/g++/builtinbuf.h
OLD_FILES+=usr/include/g++/bvector.h
OLD_FILES+=usr/include/g++/cassert
OLD_FILES+=usr/include/g++/cctype
OLD_FILES+=usr/include/g++/cerrno
OLD_FILES+=usr/include/g++/cfloat
OLD_FILES+=usr/include/g++/ciso646
OLD_FILES+=usr/include/g++/climits
OLD_FILES+=usr/include/g++/clocale
OLD_FILES+=usr/include/g++/cmath
OLD_FILES+=usr/include/g++/complex
OLD_FILES+=usr/include/g++/complex.h
OLD_FILES+=usr/include/g++/csetjmp
OLD_FILES+=usr/include/g++/csignal
OLD_FILES+=usr/include/g++/cstdarg
OLD_FILES+=usr/include/g++/cstddef
OLD_FILES+=usr/include/g++/cstdio
OLD_FILES+=usr/include/g++/cstdlib
OLD_FILES+=usr/include/g++/cstring
OLD_FILES+=usr/include/g++/ctime
OLD_FILES+=usr/include/g++/cwchar
OLD_FILES+=usr/include/g++/cwctype
OLD_FILES+=usr/include/g++/defalloc.h
OLD_FILES+=usr/include/g++/deque
OLD_FILES+=usr/include/g++/deque.h
OLD_FILES+=usr/include/g++/editbuf.h
OLD_FILES+=usr/include/g++/exception
OLD_FILES+=usr/include/g++/floatio.h
OLD_FILES+=usr/include/g++/fstream
OLD_FILES+=usr/include/g++/fstream.h
OLD_FILES+=usr/include/g++/function.h
OLD_FILES+=usr/include/g++/functional
OLD_FILES+=usr/include/g++/hash_map
OLD_FILES+=usr/include/g++/hash_map.h
OLD_FILES+=usr/include/g++/hash_set
OLD_FILES+=usr/include/g++/hash_set.h
OLD_FILES+=usr/include/g++/hashtable.h
OLD_FILES+=usr/include/g++/heap.h
OLD_FILES+=usr/include/g++/indstream.h
OLD_FILES+=usr/include/g++/iolibio.h
OLD_FILES+=usr/include/g++/iomanip
OLD_FILES+=usr/include/g++/iomanip.h
OLD_FILES+=usr/include/g++/iosfwd
OLD_FILES+=usr/include/g++/iostdio.h
OLD_FILES+=usr/include/g++/iostream
OLD_FILES+=usr/include/g++/iostream.h
OLD_FILES+=usr/include/g++/iostreamP.h
OLD_FILES+=usr/include/g++/istream.h
OLD_FILES+=usr/include/g++/iterator
OLD_FILES+=usr/include/g++/iterator.h
OLD_FILES+=usr/include/g++/libio.h
OLD_FILES+=usr/include/g++/libioP.h
OLD_FILES+=usr/include/g++/list
OLD_FILES+=usr/include/g++/list.h
OLD_FILES+=usr/include/g++/map
OLD_FILES+=usr/include/g++/map.h
OLD_FILES+=usr/include/g++/memory
OLD_FILES+=usr/include/g++/multimap.h
OLD_FILES+=usr/include/g++/multiset.h
OLD_FILES+=usr/include/g++/new
OLD_FILES+=usr/include/g++/new.h
OLD_FILES+=usr/include/g++/numeric
OLD_FILES+=usr/include/g++/ostream.h
OLD_FILES+=usr/include/g++/pair.h
OLD_FILES+=usr/include/g++/parsestream.h
OLD_FILES+=usr/include/g++/pfstream.h
OLD_FILES+=usr/include/g++/procbuf.h
OLD_FILES+=usr/include/g++/pthread_alloc
OLD_FILES+=usr/include/g++/pthread_alloc.h
OLD_FILES+=usr/include/g++/queue
OLD_FILES+=usr/include/g++/rope
OLD_FILES+=usr/include/g++/rope.h
OLD_FILES+=usr/include/g++/ropeimpl.h
OLD_FILES+=usr/include/g++/set
OLD_FILES+=usr/include/g++/set.h
OLD_FILES+=usr/include/g++/slist
OLD_FILES+=usr/include/g++/slist.h
OLD_FILES+=usr/include/g++/sstream
OLD_FILES+=usr/include/g++/stack
OLD_FILES+=usr/include/g++/stack.h
OLD_FILES+=usr/include/g++/std/bastring.cc
OLD_FILES+=usr/include/g++/std/bastring.h
OLD_FILES+=usr/include/g++/std/complext.cc
OLD_FILES+=usr/include/g++/std/complext.h
OLD_FILES+=usr/include/g++/std/dcomplex.h
OLD_FILES+=usr/include/g++/std/fcomplex.h
OLD_FILES+=usr/include/g++/std/gslice.h
OLD_FILES+=usr/include/g++/std/gslice_array.h
OLD_FILES+=usr/include/g++/std/indirect_array.h
OLD_FILES+=usr/include/g++/std/ldcomplex.h
OLD_FILES+=usr/include/g++/std/mask_array.h
OLD_FILES+=usr/include/g++/std/slice.h
OLD_FILES+=usr/include/g++/std/slice_array.h
OLD_FILES+=usr/include/g++/std/std_valarray.h
OLD_FILES+=usr/include/g++/std/straits.h
OLD_FILES+=usr/include/g++/std/valarray_array.h
OLD_FILES+=usr/include/g++/std/valarray_array.tcc
OLD_FILES+=usr/include/g++/std/valarray_meta.h
OLD_FILES+=usr/include/g++/stdexcept
OLD_FILES+=usr/include/g++/stdiostream.h
OLD_FILES+=usr/include/g++/stl.h
OLD_FILES+=usr/include/g++/stl_algo.h
OLD_FILES+=usr/include/g++/stl_algobase.h
OLD_FILES+=usr/include/g++/stl_alloc.h
OLD_FILES+=usr/include/g++/stl_bvector.h
OLD_FILES+=usr/include/g++/stl_config.h
OLD_FILES+=usr/include/g++/stl_construct.h
OLD_FILES+=usr/include/g++/stl_deque.h
OLD_FILES+=usr/include/g++/stl_function.h
OLD_FILES+=usr/include/g++/stl_hash_fun.h
OLD_FILES+=usr/include/g++/stl_hash_map.h
OLD_FILES+=usr/include/g++/stl_hash_set.h
OLD_FILES+=usr/include/g++/stl_hashtable.h
OLD_FILES+=usr/include/g++/stl_heap.h
OLD_FILES+=usr/include/g++/stl_iterator.h
OLD_FILES+=usr/include/g++/stl_list.h
OLD_FILES+=usr/include/g++/stl_map.h
OLD_FILES+=usr/include/g++/stl_multimap.h
OLD_FILES+=usr/include/g++/stl_multiset.h
OLD_FILES+=usr/include/g++/stl_numeric.h
OLD_FILES+=usr/include/g++/stl_pair.h
OLD_FILES+=usr/include/g++/stl_queue.h
OLD_FILES+=usr/include/g++/stl_raw_storage_iter.h
OLD_FILES+=usr/include/g++/stl_relops.h
OLD_FILES+=usr/include/g++/stl_rope.h
OLD_FILES+=usr/include/g++/stl_set.h
OLD_FILES+=usr/include/g++/stl_slist.h
OLD_FILES+=usr/include/g++/stl_stack.h
OLD_FILES+=usr/include/g++/stl_tempbuf.h
OLD_FILES+=usr/include/g++/stl_tree.h
OLD_FILES+=usr/include/g++/stl_uninitialized.h
OLD_FILES+=usr/include/g++/stl_vector.h
OLD_FILES+=usr/include/g++/stream.h
OLD_FILES+=usr/include/g++/streambuf.h
OLD_FILES+=usr/include/g++/strfile.h
OLD_FILES+=usr/include/g++/string
OLD_FILES+=usr/include/g++/strstream
OLD_FILES+=usr/include/g++/strstream.h
OLD_FILES+=usr/include/g++/tempbuf.h
OLD_FILES+=usr/include/g++/tree.h
OLD_FILES+=usr/include/g++/type_traits.h
OLD_FILES+=usr/include/g++/typeinfo
OLD_FILES+=usr/include/g++/utility
OLD_FILES+=usr/include/g++/valarray
OLD_FILES+=usr/include/g++/vector
OLD_FILES+=usr/include/g++/vector.h
OLD_FILES+=usr/include/gmp.h
OLD_FILES+=usr/include/isc/assertions.h
OLD_FILES+=usr/include/isc/ctl.h
OLD_FILES+=usr/include/isc/dst.h
OLD_FILES+=usr/include/isc/eventlib.h
OLD_FILES+=usr/include/isc/heap.h
OLD_FILES+=usr/include/isc/irpmarshall.h
OLD_FILES+=usr/include/isc/list.h
OLD_FILES+=usr/include/isc/logging.h
OLD_FILES+=usr/include/isc/memcluster.h
OLD_FILES+=usr/include/isc/misc.h
OLD_FILES+=usr/include/isc/tree.h
OLD_FILES+=usr/include/machine/ansi.h
OLD_FILES+=usr/include/machine/apic.h
OLD_FILES+=usr/include/machine/asc_ioctl.h
OLD_FILES+=usr/include/machine/asnames.h
OLD_FILES+=usr/include/machine/bus_at386.h
OLD_FILES+=usr/include/machine/bus_memio.h
OLD_FILES+=usr/include/machine/bus_pc98.h
OLD_FILES+=usr/include/machine/bus_pio.h
OLD_FILES+=usr/include/machine/cdk.h
OLD_FILES+=usr/include/machine/comstats.h
OLD_FILES+=usr/include/machine/console.h
OLD_FILES+=usr/include/machine/critical.h
OLD_FILES+=usr/include/machine/cronyx.h
OLD_FILES+=usr/include/machine/dvcfg.h
OLD_FILES+=usr/include/machine/globaldata.h
OLD_FILES+=usr/include/machine/globals.h
OLD_FILES+=usr/include/machine/gsc.h
OLD_FILES+=usr/include/machine/i4b_isppp.h
OLD_FILES+=usr/include/machine/if_wavelan_ieee.h
OLD_FILES+=usr/include/machine/iic.h
OLD_FILES+=usr/include/machine/ioctl_ctx.h
OLD_FILES+=usr/include/machine/ioctl_fd.h
OLD_FILES+=usr/include/machine/ipl.h
OLD_FILES+=usr/include/machine/lock.h
OLD_FILES+=usr/include/machine/mouse.h
OLD_FILES+=usr/include/machine/mpapic.h
OLD_FILES+=usr/include/machine/mtpr.h
OLD_FILES+=usr/include/machine/pc/msdos.h
OLD_FILES+=usr/include/machine/physio_proc.h
OLD_FILES+=usr/include/machine/smb.h
OLD_FILES+=usr/include/machine/spigot.h
OLD_FILES+=usr/include/machine/types.h
OLD_FILES+=usr/include/machine/uc_device.h
OLD_FILES+=usr/include/machine/ultrasound.h
OLD_FILES+=usr/include/machine/wtio.h
OLD_FILES+=usr/include/msdosfs/bootsect.h
OLD_FILES+=usr/include/msdosfs/bpb.h
OLD_FILES+=usr/include/msdosfs/denode.h
OLD_FILES+=usr/include/msdosfs/direntry.h
OLD_FILES+=usr/include/msdosfs/fat.h
OLD_FILES+=usr/include/msdosfs/msdosfsmount.h
OLD_FILES+=usr/include/net/hostcache.h
OLD_FILES+=usr/include/net/if_faith.h
OLD_FILES+=usr/include/net/if_ieee80211.h
OLD_FILES+=usr/include/net/if_tunvar.h
OLD_FILES+=usr/include/net/intrq.h
OLD_FILES+=usr/include/netatm/kern_include.h
OLD_FILES+=usr/include/netinet/if_fddi.h
OLD_FILES+=usr/include/netinet/in_hostcache.h
OLD_FILES+=usr/include/netinet/ip_flow.h
OLD_FILES+=usr/include/netinet/ip_fw2.h
OLD_FILES+=usr/include/netinet6/in6_prefix.h
OLD_FILES+=usr/include/netns/idp.h
OLD_FILES+=usr/include/netns/idp_var.h
OLD_FILES+=usr/include/netns/ns.h
OLD_FILES+=usr/include/netns/ns_error.h
OLD_FILES+=usr/include/netns/ns_if.h
OLD_FILES+=usr/include/netns/ns_pcb.h
OLD_FILES+=usr/include/netns/sp.h
OLD_FILES+=usr/include/netns/spidp.h
OLD_FILES+=usr/include/netns/spp_debug.h
OLD_FILES+=usr/include/netns/spp_timer.h
OLD_FILES+=usr/include/netns/spp_var.h
OLD_FILES+=usr/include/nfs/nfs.h
OLD_FILES+=usr/include/nfs/nfsm_subs.h
OLD_FILES+=usr/include/nfs/nfsmount.h
OLD_FILES+=usr/include/nfs/nfsnode.h
OLD_FILES+=usr/include/nfs/nfsrtt.h
OLD_FILES+=usr/include/nfs/nfsrvcache.h
OLD_FILES+=usr/include/nfs/nfsv2.h
OLD_FILES+=usr/include/nfs/nqnfs.h
OLD_FILES+=usr/include/ntfs/ntfs.h
OLD_FILES+=usr/include/ntfs/ntfs_compr.h
OLD_FILES+=usr/include/ntfs/ntfs_ihash.h
OLD_FILES+=usr/include/ntfs/ntfs_inode.h
OLD_FILES+=usr/include/ntfs/ntfs_subr.h
OLD_FILES+=usr/include/ntfs/ntfs_vfsops.h
OLD_FILES+=usr/include/ntfs/ntfsmount.h
OLD_FILES+=usr/include/nwfs/nwfs.h
OLD_FILES+=usr/include/nwfs/nwfs_mount.h
OLD_FILES+=usr/include/nwfs/nwfs_node.h
OLD_FILES+=usr/include/nwfs/nwfs_subr.h
OLD_FILES+=usr/include/posix4/_semaphore.h
OLD_FILES+=usr/include/posix4/aio.h
OLD_FILES+=usr/include/posix4/ksem.h
OLD_FILES+=usr/include/posix4/mqueue.h
OLD_FILES+=usr/include/posix4/posix4.h
OLD_FILES+=usr/include/posix4/sched.h
OLD_FILES+=usr/include/posix4/semaphore.h
OLD_DIRS+=usr/include/posix4
OLD_FILES+=usr/include/security/_pam_compat.h
OLD_FILES+=usr/include/security/_pam_macros.h
OLD_FILES+=usr/include/security/_pam_types.h
OLD_FILES+=usr/include/security/pam_malloc.h
OLD_FILES+=usr/include/security/pam_misc.h
OLD_FILES+=usr/include/skey.h
OLD_FILES+=usr/include/strhash.h
OLD_FILES+=usr/include/struct.h
OLD_FILES+=usr/include/sys/_label.h
OLD_FILES+=usr/include/sys/_posix.h
OLD_FILES+=usr/include/sys/bus_private.h
OLD_FILES+=usr/include/sys/ccdvar.h
OLD_FILES+=usr/include/sys/diskslice.h
OLD_FILES+=usr/include/sys/dmap.h
OLD_FILES+=usr/include/sys/inttypes.h
OLD_FILES+=usr/include/sys/jumbo.h
OLD_FILES+=usr/include/sys/mac_policy.h
OLD_FILES+=usr/include/sys/pbioio.h
OLD_FILES+=usr/include/sys/syscall-hide.h
OLD_FILES+=usr/include/sys/tprintf.h
OLD_FILES+=usr/include/sys/vnioctl.h
OLD_FILES+=usr/include/sys/wormio.h
OLD_FILES+=usr/include/telnet.h
OLD_FILES+=usr/include/ufs/mfs/mfs_extern.h
OLD_FILES+=usr/include/ufs/mfs/mfsnode.h
OLD_FILES+=usr/include/values.h
OLD_FILES+=usr/include/vm/vm_zone.h
OLD_FILES+=usr/share/examples/etc/usbd.conf
OLD_FILES+=usr/share/examples/meteor/README
OLD_FILES+=usr/share/examples/meteor/rgb16.c
OLD_FILES+=usr/share/examples/meteor/rgb24.c
OLD_FILES+=usr/share/examples/meteor/test-n.c
OLD_FILES+=usr/share/examples/meteor/yuvpk.c
OLD_FILES+=usr/share/examples/meteor/yuvpl.c
OLD_FILES+=usr/share/examples/worm/README
OLD_FILES+=usr/share/examples/worm/makecdfs.sh
OLD_FILES+=usr/share/groff_font/devlj4/Makefile
OLD_FILES+=usr/share/groff_font/devlj4/text.map
OLD_FILES+=usr/share/groff_font/devlj4/special.map
OLD_FILES+=usr/share/misc/nslookup.help
OLD_FILES+=usr/share/sendmail/cf/feature/nodns.m4
OLD_FILES+=usr/share/syscons/keymaps/lat-amer.kbd
OLD_FILES+=usr/share/vi/catalog/ru_SU.KOI8-R
OLD_FILES+=usr/share/zoneinfo/Africa/Timbuktu
OLD_FILES+=usr/share/zoneinfo/Africa/Asmera
OLD_FILES+=usr/share/zoneinfo/America/Buenos_Aires
OLD_FILES+=usr/share/zoneinfo/America/Cordoba
OLD_FILES+=usr/share/zoneinfo/America/Jujuy
OLD_FILES+=usr/share/zoneinfo/America/Catamarca
OLD_FILES+=usr/share/zoneinfo/America/Mendoza
OLD_FILES+=usr/share/zoneinfo/America/Indianapolis
OLD_FILES+=usr/share/zoneinfo/America/Louisville
OLD_FILES+=usr/share/zoneinfo/America/Argentina/ComodRivadavia
OLD_FILES+=usr/share/zoneinfo/Atlantic/Faeroe
OLD_FILES+=usr/share/zoneinfo/Europe/Belfast
OLD_FILES+=usr/share/zoneinfo/Pacific/Yap
OLD_FILES+=usr/share/zoneinfo/SystemV/YST9
OLD_FILES+=usr/share/zoneinfo/SystemV/PST8
OLD_FILES+=usr/share/zoneinfo/SystemV/EST5EDT
OLD_FILES+=usr/share/zoneinfo/SystemV/CST6CDT
OLD_FILES+=usr/share/zoneinfo/SystemV/MST7MDT
OLD_FILES+=usr/share/zoneinfo/SystemV/PST8PDT
OLD_FILES+=usr/share/zoneinfo/SystemV/YST9YDT
OLD_FILES+=usr/share/zoneinfo/SystemV/HST10
OLD_FILES+=usr/share/zoneinfo/SystemV/MST7
OLD_FILES+=usr/share/zoneinfo/SystemV/EST5
OLD_FILES+=usr/share/zoneinfo/SystemV/AST4ADT
OLD_FILES+=usr/share/zoneinfo/SystemV/CST6
OLD_FILES+=usr/share/zoneinfo/SystemV/AST4
OLD_FILES+=usr/share/doc/ntp/accopt.htm
OLD_FILES+=usr/share/doc/ntp/assoc.htm
OLD_FILES+=usr/share/doc/ntp/audio.htm
OLD_FILES+=usr/share/doc/ntp/authopt.htm
OLD_FILES+=usr/share/doc/ntp/biblio.htm
OLD_FILES+=usr/share/doc/ntp/build.htm
OLD_FILES+=usr/share/doc/ntp/clockopt.htm
OLD_FILES+=usr/share/doc/ntp/config.htm
OLD_FILES+=usr/share/doc/ntp/confopt.htm
OLD_FILES+=usr/share/doc/ntp/copyright.htm
OLD_FILES+=usr/share/doc/ntp/debug.htm
OLD_FILES+=usr/share/doc/ntp/driver1.htm
OLD_FILES+=usr/share/doc/ntp/driver10.htm
OLD_FILES+=usr/share/doc/ntp/driver11.htm
OLD_FILES+=usr/share/doc/ntp/driver12.htm
OLD_FILES+=usr/share/doc/ntp/driver16.htm
OLD_FILES+=usr/share/doc/ntp/driver18.htm
OLD_FILES+=usr/share/doc/ntp/driver19.htm
OLD_FILES+=usr/share/doc/ntp/driver2.htm
OLD_FILES+=usr/share/doc/ntp/driver20.htm
OLD_FILES+=usr/share/doc/ntp/driver22.htm
OLD_FILES+=usr/share/doc/ntp/driver23.htm
OLD_FILES+=usr/share/doc/ntp/driver24.htm
OLD_FILES+=usr/share/doc/ntp/driver26.htm
OLD_FILES+=usr/share/doc/ntp/driver27.htm
OLD_FILES+=usr/share/doc/ntp/driver28.htm
OLD_FILES+=usr/share/doc/ntp/driver29.htm
OLD_FILES+=usr/share/doc/ntp/driver3.htm
OLD_FILES+=usr/share/doc/ntp/driver30.htm
OLD_FILES+=usr/share/doc/ntp/driver32.htm
OLD_FILES+=usr/share/doc/ntp/driver33.htm
OLD_FILES+=usr/share/doc/ntp/driver34.htm
OLD_FILES+=usr/share/doc/ntp/driver35.htm
OLD_FILES+=usr/share/doc/ntp/driver36.htm
OLD_FILES+=usr/share/doc/ntp/driver37.htm
OLD_FILES+=usr/share/doc/ntp/driver4.htm
OLD_FILES+=usr/share/doc/ntp/driver5.htm
OLD_FILES+=usr/share/doc/ntp/driver6.htm
OLD_FILES+=usr/share/doc/ntp/driver7.htm
OLD_FILES+=usr/share/doc/ntp/driver8.htm
OLD_FILES+=usr/share/doc/ntp/driver9.htm
OLD_FILES+=usr/share/doc/ntp/exec.htm
OLD_FILES+=usr/share/doc/ntp/extern.htm
OLD_FILES+=usr/share/doc/ntp/gadget.htm
OLD_FILES+=usr/share/doc/ntp/hints.htm
OLD_FILES+=usr/share/doc/ntp/howto.htm
OLD_FILES+=usr/share/doc/ntp/htmlprimer.htm
OLD_FILES+=usr/share/doc/ntp/index.htm
OLD_FILES+=usr/share/doc/ntp/kern.htm
OLD_FILES+=usr/share/doc/ntp/kernpps.htm
OLD_FILES+=usr/share/doc/ntp/ldisc.htm
OLD_FILES+=usr/share/doc/ntp/measure.htm
OLD_FILES+=usr/share/doc/ntp/miscopt.htm
OLD_FILES+=usr/share/doc/ntp/monopt.htm
OLD_FILES+=usr/share/doc/ntp/mx4200data.htm
OLD_FILES+=usr/share/doc/ntp/notes.htm
OLD_FILES+=usr/share/doc/ntp/ntpd.htm
OLD_FILES+=usr/share/doc/ntp/ntpdate.htm
OLD_FILES+=usr/share/doc/ntp/ntpdc.htm
OLD_FILES+=usr/share/doc/ntp/ntpq.htm
OLD_FILES+=usr/share/doc/ntp/ntptime.htm
OLD_FILES+=usr/share/doc/ntp/ntptrace.htm
OLD_FILES+=usr/share/doc/ntp/parsedata.htm
OLD_FILES+=usr/share/doc/ntp/parsenew.htm
OLD_FILES+=usr/share/doc/ntp/patches.htm
OLD_FILES+=usr/share/doc/ntp/porting.htm
OLD_FILES+=usr/share/doc/ntp/pps.htm
OLD_FILES+=usr/share/doc/ntp/prefer.htm
OLD_FILES+=usr/share/doc/ntp/qth.htm
OLD_FILES+=usr/share/doc/ntp/quick.htm
OLD_FILES+=usr/share/doc/ntp/rdebug.htm
OLD_FILES+=usr/share/doc/ntp/refclock.htm
OLD_FILES+=usr/share/doc/ntp/release.htm
OLD_FILES+=usr/share/doc/ntp/tickadj.htm
OLD_FILES+=usr/share/doc/papers/nqnfs.ascii.gz
OLD_FILES+=usr/share/doc/papers/px.ascii.gz
OLD_FILES+=usr/share/man/man3/exp10.3.gz
OLD_FILES+=usr/share/man/man3/exp10f.3.gz
OLD_FILES+=usr/share/man/man3/fpsetsticky.3.gz
OLD_FILES+=usr/share/man/man3/gss_krb5_compat_des3_mic.3.gz
OLD_FILES+=usr/share/man/man3/gss_krb5_copy_ccache.3.gz
OLD_FILES+=usr/share/man/man3/mac_is_present_np.3.gz
OLD_FILES+=usr/share/man/man3/mbmb.3.gz
OLD_FILES+=usr/share/man/man3/setrunelocale.3.gz
OLD_FILES+=usr/share/man/man5/usbd.conf.5.gz
.if ${TARGET_ARCH} != "i386" && ${TARGET_ARCH} != "amd64"
OLD_FILES+=usr/share/man/man8/boot_i386.8.gz
.endif
.if ${TARGET_ARCH} != "aarch64" && ${TARGET} != "arm" && \
${TARGET_ARCH} != "powerpc" && ${TARGET_ARCH} != "powerpc64" && \
${TARGET_ARCH} != "sparc64" && ${TARGET} != "mips"
OLD_FILES+=usr/share/man/man8/ofwdump.8.gz
.endif
OLD_FILES+=usr/share/man/man8/mount_reiserfs.8.gz
OLD_FILES+=usr/share/man/man9/VFS_START.9.gz
OLD_FILES+=usr/share/man/man9/cpu_critical_exit.9.gz
OLD_FILES+=usr/share/man/man9/cpu_critical_enter.9.gz
OLD_FILES+=usr/share/info/annotate.info.gz
OLD_FILES+=usr/share/info/tar.info.gz
OLD_FILES+=usr/share/bsnmp/defs/tree.def
OLD_FILES+=usr/share/bsnmp/defs/mibII_tree.def
OLD_FILES+=usr/share/bsnmp/defs/netgraph_tree.def
OLD_FILES+=usr/share/bsnmp/mibs/FOKUS-MIB.txt
OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-MIB.txt
OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-SNMPD.txt
OLD_FILES+=usr/share/bsnmp/mibs/BEGEMOT-NETGRAPH.txt
OLD_FILES+=usr/libdata/msdosfs/iso22dos
OLD_FILES+=usr/libdata/msdosfs/iso72dos
OLD_FILES+=usr/libdata/msdosfs/koi2dos
OLD_FILES+=usr/libdata/msdosfs/koi8u2dos
# The following files are *not* obsolete, they just don't get touched at
# install, so don't add them:
# - boot/loader.rc
# - usr/share/tmac/man.local
# - usr/share/tmac/mm/locale
# - usr/share/tmac/mm/se_locale
# - var/yp/Makefile
# 20071120: shared library version bump
OLD_LIBS+=usr/lib/libasn1.so.8
OLD_LIBS+=usr/lib/libgssapi.so.8
OLD_LIBS+=usr/lib/libgssapi_krb5.so.8
OLD_LIBS+=usr/lib/libhdb.so.8
OLD_LIBS+=usr/lib/libkadm5clnt.so.8
OLD_LIBS+=usr/lib/libkadm5srv.so.8
OLD_LIBS+=usr/lib/libkafs5.so.8
OLD_LIBS+=usr/lib/libkrb5.so.8
OLD_LIBS+=usr/lib/libobjc.so.2
OLD_LIBS+=usr/lib32/libgssapi.so.8
OLD_LIBS+=usr/lib32/libobjc.so.2
# 20070519: GCC 4.2
OLD_LIBS+=usr/lib/libg2c.a
OLD_LIBS+=usr/lib/libg2c.so
OLD_LIBS+=usr/lib/libg2c.so.2
OLD_LIBS+=usr/lib/libg2c_p.a
OLD_LIBS+=usr/lib/libgcc_pic.a
OLD_LIBS+=usr/lib32/libg2c.a
OLD_LIBS+=usr/lib32/libg2c.so
OLD_LIBS+=usr/lib32/libg2c.so.2
OLD_LIBS+=usr/lib32/libg2c_p.a
OLD_LIBS+=usr/lib32/libgcc_pic.a
# 20060729: OpenSSL 0.9.7e -> 0.9.8b upgrade
OLD_LIBS+=lib/libcrypto.so.4
OLD_LIBS+=usr/lib/libssl.so.4
OLD_LIBS+=usr/lib32/libcrypto.so.4
OLD_LIBS+=usr/lib32/libssl.so.4
# 20060521: gethostbyaddr(3) ABI change
OLD_LIBS+=usr/lib/libroken.so.8
OLD_LIBS+=lib/libatm.so.3
OLD_LIBS+=lib/libc.so.6
OLD_LIBS+=lib/libutil.so.5
OLD_LIBS+=usr/lib32/libatm.so.3
OLD_LIBS+=usr/lib32/libc.so.6
OLD_LIBS+=usr/lib32/libutil.so.5
# 20060413: shared library moved to /usr/lib
OLD_LIBS+=lib/libgpib.so.1
# 20060413: libpcap.so.4 moved to /lib/
OLD_LIBS+=usr/lib/libpcap.so.4
# 20060412: libpthread.so.2 moved to /lib/
OLD_LIBS+=usr/lib/libpthread.so.2
# 20060127: revert libdisk to static-only
OLD_LIBS+=usr/lib/libdisk.so.3
# 20051027: libc_r discontinued (removed 20101113)
OLD_LIBS+=usr/lib/libc_r.a
OLD_LIBS+=usr/lib/libc_r.so
OLD_LIBS+=usr/lib/libc_r.so.7
OLD_LIBS+=usr/lib/libc_r_p.a
OLD_LIBS+=usr/lib32/libc_r.a
OLD_LIBS+=usr/lib32/libc_r.so
OLD_LIBS+=usr/lib32/libc_r.so.7
OLD_LIBS+=usr/lib32/libc_r_p.a
# 20050722: bump for 6.0-RELEASE
OLD_LIBS+=lib/libalias.so.4
OLD_LIBS+=lib/libatm.so.2
OLD_LIBS+=lib/libbegemot.so.1
OLD_LIBS+=lib/libbsdxml.so.1
OLD_LIBS+=lib/libbsnmp.so.2
OLD_LIBS+=lib/libc.so.5
OLD_LIBS+=lib/libcam.so.2
OLD_LIBS+=lib/libcrypt.so.2
OLD_LIBS+=lib/libcrypto.so.3
OLD_LIBS+=lib/libdevstat.so.4
OLD_LIBS+=lib/libedit.so.4
OLD_LIBS+=lib/libgeom.so.2
OLD_LIBS+=lib/libgpib.so.0
OLD_LIBS+=lib/libipsec.so.1
OLD_LIBS+=lib/libipx.so.2
OLD_LIBS+=lib/libkiconv.so.1
OLD_LIBS+=lib/libkvm.so.2
OLD_LIBS+=lib/libm.so.3
OLD_LIBS+=lib/libmd.so.2
OLD_LIBS+=lib/libncurses.so.5
OLD_LIBS+=lib/libreadline.so.5
OLD_LIBS+=lib/libsbuf.so.2
OLD_LIBS+=lib/libufs.so.2
OLD_LIBS+=lib/libutil.so.4
OLD_LIBS+=lib/libz.so.2
OLD_LIBS+=usr/lib/libarchive.so.1
OLD_LIBS+=usr/lib/libasn1.so.7
OLD_LIBS+=usr/lib/libbluetooth.so.1
OLD_LIBS+=usr/lib/libbz2.so.1
OLD_LIBS+=usr/lib/libc_r.so.5
OLD_LIBS+=usr/lib/libcalendar.so.2
OLD_LIBS+=usr/lib/libcom_err.so.2
OLD_LIBS+=usr/lib/libdevinfo.so.2
OLD_LIBS+=usr/lib/libdialog.so.4
OLD_LIBS+=usr/lib/libfetch.so.3
OLD_LIBS+=usr/lib/libform.so.2
OLD_LIBS+=usr/lib/libftpio.so.5
OLD_LIBS+=usr/lib/libg2c.so.1
OLD_LIBS+=usr/lib/libgnuregex.so.2
OLD_LIBS+=usr/lib/libgssapi.so.7
OLD_LIBS+=usr/lib/libhdb.so.7
OLD_LIBS+=usr/lib/libhistory.so.5
OLD_LIBS+=usr/lib/libkadm5clnt.so.7
OLD_LIBS+=usr/lib/libkadm5srv.so.7
OLD_LIBS+=usr/lib/libkafs5.so.7
OLD_LIBS+=usr/lib/libkrb5.so.7
OLD_LIBS+=usr/lib/libmagic.so.1
OLD_LIBS+=usr/lib/libmenu.so.2
OLD_LIBS+=usr/lib/libmilter.so.2
OLD_LIBS+=usr/lib/libmp.so.4
OLD_LIBS+=usr/lib/libncp.so.1
OLD_LIBS+=usr/lib/libnetgraph.so.1
OLD_LIBS+=usr/lib/libngatm.so.1
OLD_LIBS+=usr/lib/libobjc.so.1
OLD_LIBS+=usr/lib/libopie.so.3
OLD_LIBS+=usr/lib/libpam.so.2
OLD_LIBS+=usr/lib/libpanel.so.2
OLD_LIBS+=usr/lib/libpcap.so.3
OLD_LIBS+=usr/lib/libpmc.so.2
OLD_LIBS+=usr/lib/libpthread.so.1
OLD_LIBS+=usr/lib/libradius.so.1
OLD_LIBS+=usr/lib/libroken.so.7
OLD_LIBS+=usr/lib/librpcsvc.so.2
OLD_LIBS+=usr/lib/libsdp.so.1
OLD_LIBS+=usr/lib/libsmb.so.1
OLD_LIBS+=usr/lib/libssh.so.2
OLD_LIBS+=usr/lib/libssl.so.3
OLD_LIBS+=usr/lib/libstdc++.so.4
OLD_LIBS+=usr/lib/libtacplus.so.1
OLD_LIBS+=usr/lib/libthr.so.1
OLD_LIBS+=usr/lib/libthread_db.so.1
OLD_LIBS+=usr/lib/libugidfw.so.1
OLD_LIBS+=usr/lib/libusbhid.so.1
OLD_LIBS+=usr/lib/libvgl.so.3
OLD_LIBS+=usr/lib/libwrap.so.3
OLD_LIBS+=usr/lib/libypclnt.so.1
OLD_LIBS+=usr/lib/pam_chroot.so.2
OLD_LIBS+=usr/lib/pam_deny.so.2
OLD_LIBS+=usr/lib/pam_echo.so.2
OLD_LIBS+=usr/lib/pam_exec.so.2
OLD_LIBS+=usr/lib/pam_ftpusers.so.2
OLD_LIBS+=usr/lib/pam_group.so.2
OLD_LIBS+=usr/lib/pam_guest.so.2
OLD_LIBS+=usr/lib/pam_krb5.so.2
OLD_LIBS+=usr/lib/pam_ksu.so.2
OLD_LIBS+=usr/lib/pam_lastlog.so.2
OLD_LIBS+=usr/lib/pam_login_access.so.2
OLD_LIBS+=usr/lib/pam_nologin.so.2
OLD_LIBS+=usr/lib/pam_opie.so.2
OLD_LIBS+=usr/lib/pam_opieaccess.so.2
OLD_LIBS+=usr/lib/pam_passwdqc.so.2
OLD_LIBS+=usr/lib/pam_permit.so.2
OLD_LIBS+=usr/lib/pam_radius.so.2
OLD_LIBS+=usr/lib/pam_rhosts.so.2
OLD_LIBS+=usr/lib/pam_rootok.so.2
OLD_LIBS+=usr/lib/pam_securetty.so.2
OLD_LIBS+=usr/lib/pam_self.so.2
OLD_LIBS+=usr/lib/pam_ssh.so.2
OLD_LIBS+=usr/lib/pam_tacplus.so.2
OLD_LIBS+=usr/lib/pam_unix.so.2
OLD_LIBS+=usr/lib/snmp_atm.so.3
OLD_LIBS+=usr/lib/snmp_mibII.so.3
OLD_LIBS+=usr/lib/snmp_netgraph.so.3
OLD_LIBS+=usr/lib/snmp_pf.so.3
# 200505XX: ?
OLD_LIBS+=usr/lib/snmp_atm.so.2
OLD_LIBS+=usr/lib/snmp_mibII.so.2
OLD_LIBS+=usr/lib/snmp_netgraph.so.2
OLD_LIBS+=usr/lib/snmp_pf.so.2
# 2005XXXX: not ready for primetime yet
OLD_LIBS+=usr/lib/libautofs.so.1
# 200411XX: libxpg4 removal
OLD_LIBS+=usr/lib/libxpg4.so.3
# 200410XX: libm compatibility fix
OLD_LIBS+=lib/libm.so.2
# 20041001: version bump
OLD_LIBS+=lib/libreadline.so.4
OLD_LIBS+=usr/lib/libhistory.so.4
OLD_LIBS+=usr/lib/libopie.so.2
OLD_LIBS+=usr/lib/libpcap.so.2
# 20040925: bind9 import
OLD_LIBS+=usr/lib/libisc.so.1
# 200408XX
OLD_LIBS+=usr/lib/snmp_netgraph.so.1
# 200404XX
OLD_LIBS+=usr/lib/libsnmp.so.1
OLD_LIBS+=usr/lib/snmp_mibII.so.1
# 200309XX
OLD_LIBS+=usr/lib/libasn1.so.6
OLD_LIBS+=usr/lib/libhdb.so.6
OLD_LIBS+=usr/lib/libkadm5clnt.so.6
OLD_LIBS+=usr/lib/libkadm5srv.so.6
OLD_LIBS+=usr/lib/libkrb5.so.6
OLD_LIBS+=usr/lib/libroken.so.6
# 200304XX
OLD_LIBS+=usr/lib/libc.so.4
OLD_LIBS+=usr/lib/libc_r.so.4
OLD_LIBS+=usr/lib/libdevstat.so.2
OLD_LIBS+=usr/lib/libedit.so.3
OLD_LIBS+=usr/lib/libgmp.so.3
OLD_LIBS+=usr/lib/libmp.so.3
OLD_LIBS+=usr/lib/libpam.so.1
OLD_LIBS+=usr/lib/libposix1e.so.2
OLD_LIBS+=usr/lib/libskey.so.2
OLD_LIBS+=usr/lib/libusbhid.so.0
OLD_LIBS+=usr/lib/libvgl.so.2
# 20030218: OpenSSL 0.9.7 import
OLD_FILES+=usr/include/des.h
OLD_FILES+=usr/lib/libdes.a
OLD_FILES+=usr/lib/libdes.so
OLD_LIBS+=usr/lib/libdes.so.3
OLD_FILES+=usr/lib/libdes_p.a
# 200302XX
OLD_LIBS+=usr/lib/libacl.so.3
OLD_LIBS+=usr/lib/libasn1.so.5
OLD_LIBS+=usr/lib/libcrypto.so.2
OLD_LIBS+=usr/lib/libgssapi.so.5
OLD_LIBS+=usr/lib/libhdb.so.5
OLD_LIBS+=usr/lib/libkadm.so.3
OLD_LIBS+=usr/lib/libkadm5clnt.so.5
OLD_LIBS+=usr/lib/libkadm5srv.so.5
OLD_LIBS+=usr/lib/libkafs.so.3
OLD_LIBS+=usr/lib/libkafs5.so.5
OLD_LIBS+=usr/lib/libkdb.so.3
OLD_LIBS+=usr/lib/libkrb.so.3
OLD_LIBS+=usr/lib/libroken.so.
OLD_LIBS+=usr/lib/libssl.so.2
OLD_LIBS+=usr/lib/pam_kerberosIV.so
# 200208XX
OLD_LIBS+=usr/lib/libgssapi.so.4
# 200203XX
OLD_LIBS+=usr/lib/libss.so.3
OLD_LIBS+=usr/lib/libusb.so.0
# 200112XX
OLD_LIBS+=usr/lib/libfetch.so.2
# 200110XX
OLD_LIBS+=usr/lib/libgssapi.so.3
# 200104XX
OLD_LIBS+=usr/lib/libdescrypt.so.2
OLD_LIBS+=usr/lib/libscrypt.so.2
# 200102XX
OLD_LIBS+=usr/lib/libcrypto.so.1
OLD_LIBS+=usr/lib/libssl.so.1
# 200009XX
OLD_LIBS+=usr/lib/libRSAglue.so.1
OLD_LIBS+=usr/lib/librsaINTL.so.1
OLD_LIBS+=usr/lib/librsaUSA.so.1
# 200006XX
OLD_LIBS+=usr/lib/libalias.so.3
OLD_LIBS+=usr/lib/libfetch.so.1
OLD_LIBS+=usr/lib/libipsec.so.0
# 200005XX
OLD_LIBS+=usr/lib/libxpg4.so.2
# 200002XX
OLD_LIBS+=usr/lib/libc.so.3
OLD_LIBS+=usr/lib/libcurses.so.2
OLD_LIBS+=usr/lib/libdialog.so.3
OLD_LIBS+=usr/lib/libedit.so.2
OLD_LIBS+=usr/lib/libf2c.so.2
OLD_LIBS+=usr/lib/libftpio.so.4
OLD_LIBS+=usr/lib/libg++.so.4
OLD_LIBS+=usr/lib/libhistory.so.3
OLD_LIBS+=usr/lib/libmytinfo.so.2
OLD_LIBS+=usr/lib/libncurses.so.3
OLD_LIBS+=usr/lib/libreadline.so.3
OLD_LIBS+=usr/lib/libss.so.2
OLD_LIBS+=usr/lib/libtermcap.so.2
OLD_LIBS+=usr/lib/libutil.so.2
OLD_LIBS+=usr/lib/libvgl.so.1
OLD_LIBS+=usr/lib/libwrap.so.2
# 19991216
OLD_FILES+=usr/sbin/xntpdc
# 199909XX
OLD_LIBS+=usr/lib/libc_r.so.3
# ???
OLD_LIBS+=usr/lib/libarchive.so.2
OLD_LIBS+=usr/lib/libbsnmp.so.1
OLD_LIBS+=usr/lib/libc_r.so.6
OLD_LIBS+=usr/lib32/libarchive.so.2
OLD_LIBS+=usr/lib32/libc_r.so.6
OLD_LIBS+=usr/lib/libcipher.so.2
OLD_LIBS+=usr/lib/libgssapi.so.6
OLD_LIBS+=usr/lib/libkse.so.1
OLD_LIBS+=usr/lib/liblwres.so.3
OLD_LIBS+=usr/lib/pam_ftp.so.2
# 20131013: Removal of the ATF tools
OLD_DIRS+=etc/atf
OLD_DIRS+=usr/share/examples/atf
OLD_DIRS+=usr/share/xml/atf
OLD_DIRS+=usr/share/xml
OLD_DIRS+=usr/share/xsl/atf
OLD_DIRS+=usr/share/xsl
# 20040925: bind9 import
OLD_DIRS+=usr/share/doc/bind/html
OLD_DIRS+=usr/share/doc/bind/misc
OLD_DIRS+=usr/share/doc/bind/
# ???
OLD_DIRS+=usr/include/g++/std
OLD_DIRS+=usr/include/msdosfs
OLD_DIRS+=usr/include/ntfs
OLD_DIRS+=usr/include/nwfs
OLD_DIRS+=usr/include/ufs/mfs
# 20011001: UUCP migration to ports
OLD_DIRS+=usr/libexec/uucp
.include "tools/build/mk/OptionalObsoleteFiles.inc"
Index: projects/clang700-import/contrib/compiler-rt
===================================================================
--- projects/clang700-import/contrib/compiler-rt (revision 340124)
+++ projects/clang700-import/contrib/compiler-rt (revision 340125)
Property changes on: projects/clang700-import/contrib/compiler-rt
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /vendor/compiler-rt/dist-release_70:r338730-340124
Index: projects/clang700-import/contrib/libc++
===================================================================
--- projects/clang700-import/contrib/libc++ (revision 340124)
+++ projects/clang700-import/contrib/libc++ (revision 340125)
Property changes on: projects/clang700-import/contrib/libc++
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /vendor/libc++/dist-release_70:r338730-340124
Index: projects/clang700-import/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
===================================================================
--- projects/clang700-import/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h (revision 340124)
+++ projects/clang700-import/contrib/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h (revision 340125)
@@ -1,164 +1,165 @@
//===- BuildLibCalls.h - Utility builder for libcalls -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file exposes an interface to build some C language libcalls for
// optimization passes that need to call the various functions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
#define LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/IRBuilder.h"
namespace llvm {
class Value;
class DataLayout;
class TargetLibraryInfo;
/// Analyze the name and prototype of the given function and set any
/// applicable attributes.
/// If the library function is unavailable, this doesn't modify it.
///
/// Returns true if any attributes were set and false otherwise.
bool inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI);
+ bool inferLibFuncAttributes(Module *M, StringRef Name, const TargetLibraryInfo &TLI);
/// Check whether the overloaded unary floating point function
/// corresponding to \a Ty is available.
bool hasUnaryFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
LibFunc DoubleFn, LibFunc FloatFn,
LibFunc LongDoubleFn);
/// Return V if it is an i8*, otherwise cast it to i8*.
Value *castToCStr(Value *V, IRBuilder<> &B);
/// Emit a call to the strlen function to the builder, for the specified
/// pointer. Ptr is required to be some pointer type, and the return value has
/// 'intptr_t' type.
Value *emitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI);
/// Emit a call to the strnlen function to the builder, for the specified
/// pointer. Ptr is required to be some pointer type, MaxLen must be of size_t
/// type, and the return value has 'intptr_t' type.
Value *emitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
const DataLayout &DL, const TargetLibraryInfo *TLI);
/// Emit a call to the strchr function to the builder, for the specified
/// pointer and character. Ptr is required to be some pointer type, and the
/// return value has 'i8*' type.
Value *emitStrChr(Value *Ptr, char C, IRBuilder<> &B,
const TargetLibraryInfo *TLI);
/// Emit a call to the strncmp function to the builder.
Value *emitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
const DataLayout &DL, const TargetLibraryInfo *TLI);
/// Emit a call to the strcpy function to the builder, for the specified
/// pointer arguments.
Value *emitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
const TargetLibraryInfo *TLI, StringRef Name = "strcpy");
/// Emit a call to the strncpy function to the builder, for the specified
/// pointer arguments and length.
Value *emitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
const TargetLibraryInfo *TLI, StringRef Name = "strncpy");
/// Emit a call to the __memcpy_chk function to the builder. This expects that
/// the Len and ObjSize have type 'intptr_t' and Dst/Src are pointers.
Value *emitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI);
/// Emit a call to the memchr function. This assumes that Ptr is a pointer,
/// Val is an i32 value, and Len is an 'intptr_t' value.
Value *emitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
const DataLayout &DL, const TargetLibraryInfo *TLI);
/// Emit a call to the memcmp function.
Value *emitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
const DataLayout &DL, const TargetLibraryInfo *TLI);
/// Emit a call to the unary function named 'Name' (e.g. 'floor'). This
/// function is known to take a single of type matching 'Op' and returns one
/// value with the same type. If 'Op' is a long double, 'l' is added as the
/// suffix of name, if 'Op' is a float, we add a 'f' suffix.
Value *emitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
const AttributeList &Attrs);
/// Emit a call to the binary function named 'Name' (e.g. 'fmin'). This
/// function is known to take type matching 'Op1' and 'Op2' and return one
/// value with the same type. If 'Op1/Op2' are long double, 'l' is added as
/// the suffix of name, if 'Op1/Op2' are float, we add a 'f' suffix.
Value *emitBinaryFloatFnCall(Value *Op1, Value *Op2, StringRef Name,
IRBuilder<> &B, const AttributeList &Attrs);
/// Emit a call to the putchar function. This assumes that Char is an integer.
Value *emitPutChar(Value *Char, IRBuilder<> &B, const TargetLibraryInfo *TLI);
/// Emit a call to the puts function. This assumes that Str is some pointer.
Value *emitPutS(Value *Str, IRBuilder<> &B, const TargetLibraryInfo *TLI);
/// Emit a call to the fputc function. This assumes that Char is an i32, and
/// File is a pointer to FILE.
Value *emitFPutC(Value *Char, Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI);
/// Emit a call to the fputc_unlocked function. This assumes that Char is an
/// i32, and File is a pointer to FILE.
Value *emitFPutCUnlocked(Value *Char, Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI);
/// Emit a call to the fputs function. Str is required to be a pointer and
/// File is a pointer to FILE.
Value *emitFPutS(Value *Str, Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI);
/// Emit a call to the fputs_unlocked function. Str is required to be a
/// pointer and File is a pointer to FILE.
Value *emitFPutSUnlocked(Value *Str, Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI);
/// Emit a call to the fwrite function. This assumes that Ptr is a pointer,
/// Size is an 'intptr_t', and File is a pointer to FILE.
Value *emitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
const DataLayout &DL, const TargetLibraryInfo *TLI);
/// Emit a call to the malloc function.
Value *emitMalloc(Value *Num, IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI);
/// Emit a call to the calloc function.
Value *emitCalloc(Value *Num, Value *Size, const AttributeList &Attrs,
IRBuilder<> &B, const TargetLibraryInfo &TLI);
/// Emit a call to the fwrite_unlocked function. This assumes that Ptr is a
/// pointer, Size is an 'intptr_t', N is nmemb and File is a pointer to FILE.
Value *emitFWriteUnlocked(Value *Ptr, Value *Size, Value *N, Value *File,
IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI);
/// Emit a call to the fgetc_unlocked function. File is a pointer to FILE.
Value *emitFGetCUnlocked(Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI);
/// Emit a call to the fgets_unlocked function. Str is required to be a
/// pointer, Size is an i32 and File is a pointer to FILE.
Value *emitFGetSUnlocked(Value *Str, Value *Size, Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI);
/// Emit a call to the fread_unlocked function. This assumes that Ptr is a
/// pointer, Size is an 'intptr_t', N is nmemb and File is a pointer to FILE.
Value *emitFReadUnlocked(Value *Ptr, Value *Size, Value *N, Value *File,
IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI);
}
#endif
Index: projects/clang700-import/contrib/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/lib/Analysis/ModuleSummaryAnalysis.cpp (revision 340125)
@@ -1,630 +1,643 @@
//===- ModuleSummaryAnalysis.cpp - Module summary index builder -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass builds a ModuleSummaryIndex object for the module, to be written
// to bitcode or LLVM assembly.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/ModuleSummaryAnalysis.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/IndirectCallPromotionAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/TypeMetadataUtils.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/User.h"
#include "llvm/Object/ModuleSymbolTable.h"
#include "llvm/Object/SymbolicFile.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "module-summary-analysis"
// Option to force edges cold which will block importing when the
// -import-cold-multiplier is set to 0. Useful for debugging.
FunctionSummary::ForceSummaryHotnessType ForceSummaryEdgesCold =
FunctionSummary::FSHT_None;
cl::opt<FunctionSummary::ForceSummaryHotnessType, true> FSEC(
"force-summary-edges-cold", cl::Hidden, cl::location(ForceSummaryEdgesCold),
cl::desc("Force all edges in the function summary to cold"),
cl::values(clEnumValN(FunctionSummary::FSHT_None, "none", "None."),
clEnumValN(FunctionSummary::FSHT_AllNonCritical,
"all-non-critical", "All non-critical edges."),
clEnumValN(FunctionSummary::FSHT_All, "all", "All edges.")));
// Walk through the operands of a given User via worklist iteration and populate
// the set of GlobalValue references encountered. Invoked either on an
// Instruction or a GlobalVariable (which walks its initializer).
-static void findRefEdges(ModuleSummaryIndex &Index, const User *CurUser,
+// Return true if any of the operands contains blockaddress. This is important
+// to know when computing summary for global var, because if global variable
+// references basic block address we can't import it separately from function
+// containing that basic block. For simplicity we currently don't import such
+// global vars at all. When importing function we aren't interested if any
+// instruction in it takes an address of any basic block, because instruction
+// can only take an address of basic block located in the same function.
+static bool findRefEdges(ModuleSummaryIndex &Index, const User *CurUser,
SetVector<ValueInfo> &RefEdges,
SmallPtrSet<const User *, 8> &Visited) {
+ bool HasBlockAddress = false;
SmallVector<const User *, 32> Worklist;
Worklist.push_back(CurUser);
while (!Worklist.empty()) {
const User *U = Worklist.pop_back_val();
if (!Visited.insert(U).second)
continue;
ImmutableCallSite CS(U);
for (const auto &OI : U->operands()) {
const User *Operand = dyn_cast<User>(OI);
if (!Operand)
continue;
- if (isa<BlockAddress>(Operand))
+ if (isa<BlockAddress>(Operand)) {
+ HasBlockAddress = true;
continue;
+ }
if (auto *GV = dyn_cast<GlobalValue>(Operand)) {
// We have a reference to a global value. This should be added to
// the reference set unless it is a callee. Callees are handled
// specially by WriteFunction and are added to a separate list.
if (!(CS && CS.isCallee(&OI)))
RefEdges.insert(Index.getOrInsertValueInfo(GV));
continue;
}
Worklist.push_back(Operand);
}
}
+ return HasBlockAddress;
}
static CalleeInfo::HotnessType getHotness(uint64_t ProfileCount,
ProfileSummaryInfo *PSI) {
if (!PSI)
return CalleeInfo::HotnessType::Unknown;
if (PSI->isHotCount(ProfileCount))
return CalleeInfo::HotnessType::Hot;
if (PSI->isColdCount(ProfileCount))
return CalleeInfo::HotnessType::Cold;
return CalleeInfo::HotnessType::None;
}
static bool isNonRenamableLocal(const GlobalValue &GV) {
return GV.hasSection() && GV.hasLocalLinkage();
}
/// Determine whether this call has all constant integer arguments (excluding
/// "this") and summarize it to VCalls or ConstVCalls as appropriate.
static void addVCallToSet(DevirtCallSite Call, GlobalValue::GUID Guid,
SetVector<FunctionSummary::VFuncId> &VCalls,
SetVector<FunctionSummary::ConstVCall> &ConstVCalls) {
std::vector<uint64_t> Args;
// Start from the second argument to skip the "this" pointer.
for (auto &Arg : make_range(Call.CS.arg_begin() + 1, Call.CS.arg_end())) {
auto *CI = dyn_cast<ConstantInt>(Arg);
if (!CI || CI->getBitWidth() > 64) {
VCalls.insert({Guid, Call.Offset});
return;
}
Args.push_back(CI->getZExtValue());
}
ConstVCalls.insert({{Guid, Call.Offset}, std::move(Args)});
}
/// If this intrinsic call requires that we add information to the function
/// summary, do so via the non-constant reference arguments.
static void addIntrinsicToSummary(
const CallInst *CI, SetVector<GlobalValue::GUID> &TypeTests,
SetVector<FunctionSummary::VFuncId> &TypeTestAssumeVCalls,
SetVector<FunctionSummary::VFuncId> &TypeCheckedLoadVCalls,
SetVector<FunctionSummary::ConstVCall> &TypeTestAssumeConstVCalls,
SetVector<FunctionSummary::ConstVCall> &TypeCheckedLoadConstVCalls) {
switch (CI->getCalledFunction()->getIntrinsicID()) {
case Intrinsic::type_test: {
auto *TypeMDVal = cast<MetadataAsValue>(CI->getArgOperand(1));
auto *TypeId = dyn_cast<MDString>(TypeMDVal->getMetadata());
if (!TypeId)
break;
GlobalValue::GUID Guid = GlobalValue::getGUID(TypeId->getString());
// Produce a summary from type.test intrinsics. We only summarize type.test
// intrinsics that are used other than by an llvm.assume intrinsic.
// Intrinsics that are assumed are relevant only to the devirtualization
// pass, not the type test lowering pass.
bool HasNonAssumeUses = llvm::any_of(CI->uses(), [](const Use &CIU) {
auto *AssumeCI = dyn_cast<CallInst>(CIU.getUser());
if (!AssumeCI)
return true;
Function *F = AssumeCI->getCalledFunction();
return !F || F->getIntrinsicID() != Intrinsic::assume;
});
if (HasNonAssumeUses)
TypeTests.insert(Guid);
SmallVector<DevirtCallSite, 4> DevirtCalls;
SmallVector<CallInst *, 4> Assumes;
findDevirtualizableCallsForTypeTest(DevirtCalls, Assumes, CI);
for (auto &Call : DevirtCalls)
addVCallToSet(Call, Guid, TypeTestAssumeVCalls,
TypeTestAssumeConstVCalls);
break;
}
case Intrinsic::type_checked_load: {
auto *TypeMDVal = cast<MetadataAsValue>(CI->getArgOperand(2));
auto *TypeId = dyn_cast<MDString>(TypeMDVal->getMetadata());
if (!TypeId)
break;
GlobalValue::GUID Guid = GlobalValue::getGUID(TypeId->getString());
SmallVector<DevirtCallSite, 4> DevirtCalls;
SmallVector<Instruction *, 4> LoadedPtrs;
SmallVector<Instruction *, 4> Preds;
bool HasNonCallUses = false;
findDevirtualizableCallsForTypeCheckedLoad(DevirtCalls, LoadedPtrs, Preds,
HasNonCallUses, CI);
// Any non-call uses of the result of llvm.type.checked.load will
// prevent us from optimizing away the llvm.type.test.
if (HasNonCallUses)
TypeTests.insert(Guid);
for (auto &Call : DevirtCalls)
addVCallToSet(Call, Guid, TypeCheckedLoadVCalls,
TypeCheckedLoadConstVCalls);
break;
}
default:
break;
}
}
static void
computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
const Function &F, BlockFrequencyInfo *BFI,
ProfileSummaryInfo *PSI, bool HasLocalsInUsedOrAsm,
DenseSet<GlobalValue::GUID> &CantBePromoted) {
// Summary not currently supported for anonymous functions, they should
// have been named.
assert(F.hasName());
unsigned NumInsts = 0;
// Map from callee ValueId to profile count. Used to accumulate profile
// counts for all static calls to a given callee.
MapVector<ValueInfo, CalleeInfo> CallGraphEdges;
SetVector<ValueInfo> RefEdges;
SetVector<GlobalValue::GUID> TypeTests;
SetVector<FunctionSummary::VFuncId> TypeTestAssumeVCalls,
TypeCheckedLoadVCalls;
SetVector<FunctionSummary::ConstVCall> TypeTestAssumeConstVCalls,
TypeCheckedLoadConstVCalls;
ICallPromotionAnalysis ICallAnalysis;
SmallPtrSet<const User *, 8> Visited;
// Add personality function, prefix data and prologue data to function's ref
// list.
findRefEdges(Index, &F, RefEdges, Visited);
bool HasInlineAsmMaybeReferencingInternal = false;
for (const BasicBlock &BB : F)
for (const Instruction &I : BB) {
if (isa<DbgInfoIntrinsic>(I))
continue;
++NumInsts;
findRefEdges(Index, &I, RefEdges, Visited);
auto CS = ImmutableCallSite(&I);
if (!CS)
continue;
const auto *CI = dyn_cast<CallInst>(&I);
// Since we don't know exactly which local values are referenced in inline
// assembly, conservatively mark the function as possibly referencing
// a local value from inline assembly to ensure we don't export a
// reference (which would require renaming and promotion of the
// referenced value).
if (HasLocalsInUsedOrAsm && CI && CI->isInlineAsm())
HasInlineAsmMaybeReferencingInternal = true;
auto *CalledValue = CS.getCalledValue();
auto *CalledFunction = CS.getCalledFunction();
if (CalledValue && !CalledFunction) {
CalledValue = CalledValue->stripPointerCastsNoFollowAliases();
// Stripping pointer casts can reveal a called function.
CalledFunction = dyn_cast<Function>(CalledValue);
}
// Check if this is an alias to a function. If so, get the
// called aliasee for the checks below.
if (auto *GA = dyn_cast<GlobalAlias>(CalledValue)) {
assert(!CalledFunction && "Expected null called function in callsite for alias");
CalledFunction = dyn_cast<Function>(GA->getBaseObject());
}
// Check if this is a direct call to a known function or a known
// intrinsic, or an indirect call with profile data.
if (CalledFunction) {
if (CI && CalledFunction->isIntrinsic()) {
addIntrinsicToSummary(
CI, TypeTests, TypeTestAssumeVCalls, TypeCheckedLoadVCalls,
TypeTestAssumeConstVCalls, TypeCheckedLoadConstVCalls);
continue;
}
// We should have named any anonymous globals
assert(CalledFunction->hasName());
auto ScaledCount = PSI->getProfileCount(&I, BFI);
auto Hotness = ScaledCount ? getHotness(ScaledCount.getValue(), PSI)
: CalleeInfo::HotnessType::Unknown;
if (ForceSummaryEdgesCold != FunctionSummary::FSHT_None)
Hotness = CalleeInfo::HotnessType::Cold;
// Use the original CalledValue, in case it was an alias. We want
// to record the call edge to the alias in that case. Eventually
// an alias summary will be created to associate the alias and
// aliasee.
auto &ValueInfo = CallGraphEdges[Index.getOrInsertValueInfo(
cast<GlobalValue>(CalledValue))];
ValueInfo.updateHotness(Hotness);
// Add the relative block frequency to CalleeInfo if there is no profile
// information.
if (BFI != nullptr && Hotness == CalleeInfo::HotnessType::Unknown) {
uint64_t BBFreq = BFI->getBlockFreq(&BB).getFrequency();
uint64_t EntryFreq = BFI->getEntryFreq();
ValueInfo.updateRelBlockFreq(BBFreq, EntryFreq);
}
} else {
// Skip inline assembly calls.
if (CI && CI->isInlineAsm())
continue;
// Skip direct calls.
if (!CalledValue || isa<Constant>(CalledValue))
continue;
// Check if the instruction has a callees metadata. If so, add callees
// to CallGraphEdges to reflect the references from the metadata, and
// to enable importing for subsequent indirect call promotion and
// inlining.
if (auto *MD = I.getMetadata(LLVMContext::MD_callees)) {
for (auto &Op : MD->operands()) {
Function *Callee = mdconst::extract_or_null<Function>(Op);
if (Callee)
CallGraphEdges[Index.getOrInsertValueInfo(Callee)];
}
}
uint32_t NumVals, NumCandidates;
uint64_t TotalCount;
auto CandidateProfileData =
ICallAnalysis.getPromotionCandidatesForInstruction(
&I, NumVals, TotalCount, NumCandidates);
for (auto &Candidate : CandidateProfileData)
CallGraphEdges[Index.getOrInsertValueInfo(Candidate.Value)]
.updateHotness(getHotness(Candidate.Count, PSI));
}
}
// Explicit add hot edges to enforce importing for designated GUIDs for
// sample PGO, to enable the same inlines as the profiled optimized binary.
for (auto &I : F.getImportGUIDs())
CallGraphEdges[Index.getOrInsertValueInfo(I)].updateHotness(
ForceSummaryEdgesCold == FunctionSummary::FSHT_All
? CalleeInfo::HotnessType::Cold
: CalleeInfo::HotnessType::Critical);
bool NonRenamableLocal = isNonRenamableLocal(F);
bool NotEligibleForImport =
NonRenamableLocal || HasInlineAsmMaybeReferencingInternal ||
// Inliner doesn't handle variadic functions.
// FIXME: refactor this to use the same code that inliner is using.
F.isVarArg() ||
// Don't try to import functions with noinline attribute.
F.getAttributes().hasFnAttribute(Attribute::NoInline);
GlobalValueSummary::GVFlags Flags(F.getLinkage(), NotEligibleForImport,
/* Live = */ false, F.isDSOLocal());
FunctionSummary::FFlags FunFlags{
F.hasFnAttribute(Attribute::ReadNone),
F.hasFnAttribute(Attribute::ReadOnly),
F.hasFnAttribute(Attribute::NoRecurse),
F.returnDoesNotAlias(),
};
auto FuncSummary = llvm::make_unique<FunctionSummary>(
Flags, NumInsts, FunFlags, RefEdges.takeVector(),
CallGraphEdges.takeVector(), TypeTests.takeVector(),
TypeTestAssumeVCalls.takeVector(), TypeCheckedLoadVCalls.takeVector(),
TypeTestAssumeConstVCalls.takeVector(),
TypeCheckedLoadConstVCalls.takeVector());
if (NonRenamableLocal)
CantBePromoted.insert(F.getGUID());
Index.addGlobalValueSummary(F, std::move(FuncSummary));
}
static void
computeVariableSummary(ModuleSummaryIndex &Index, const GlobalVariable &V,
DenseSet<GlobalValue::GUID> &CantBePromoted) {
SetVector<ValueInfo> RefEdges;
SmallPtrSet<const User *, 8> Visited;
- findRefEdges(Index, &V, RefEdges, Visited);
+ bool HasBlockAddress = findRefEdges(Index, &V, RefEdges, Visited);
bool NonRenamableLocal = isNonRenamableLocal(V);
GlobalValueSummary::GVFlags Flags(V.getLinkage(), NonRenamableLocal,
/* Live = */ false, V.isDSOLocal());
auto GVarSummary =
llvm::make_unique<GlobalVarSummary>(Flags, RefEdges.takeVector());
if (NonRenamableLocal)
CantBePromoted.insert(V.getGUID());
+ if (HasBlockAddress)
+ GVarSummary->setNotEligibleToImport();
Index.addGlobalValueSummary(V, std::move(GVarSummary));
}
static void
computeAliasSummary(ModuleSummaryIndex &Index, const GlobalAlias &A,
DenseSet<GlobalValue::GUID> &CantBePromoted) {
bool NonRenamableLocal = isNonRenamableLocal(A);
GlobalValueSummary::GVFlags Flags(A.getLinkage(), NonRenamableLocal,
/* Live = */ false, A.isDSOLocal());
auto AS = llvm::make_unique<AliasSummary>(Flags);
auto *Aliasee = A.getBaseObject();
auto *AliaseeSummary = Index.getGlobalValueSummary(*Aliasee);
assert(AliaseeSummary && "Alias expects aliasee summary to be parsed");
AS->setAliasee(AliaseeSummary);
if (NonRenamableLocal)
CantBePromoted.insert(A.getGUID());
Index.addGlobalValueSummary(A, std::move(AS));
}
// Set LiveRoot flag on entries matching the given value name.
static void setLiveRoot(ModuleSummaryIndex &Index, StringRef Name) {
if (ValueInfo VI = Index.getValueInfo(GlobalValue::getGUID(Name)))
for (auto &Summary : VI.getSummaryList())
Summary->setLive(true);
}
ModuleSummaryIndex llvm::buildModuleSummaryIndex(
const Module &M,
std::function<BlockFrequencyInfo *(const Function &F)> GetBFICallback,
ProfileSummaryInfo *PSI) {
assert(PSI);
ModuleSummaryIndex Index(/*HaveGVs=*/true);
// Identify the local values in the llvm.used and llvm.compiler.used sets,
// which should not be exported as they would then require renaming and
// promotion, but we may have opaque uses e.g. in inline asm. We collect them
// here because we use this information to mark functions containing inline
// assembly calls as not importable.
SmallPtrSet<GlobalValue *, 8> LocalsUsed;
SmallPtrSet<GlobalValue *, 8> Used;
// First collect those in the llvm.used set.
collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ false);
// Next collect those in the llvm.compiler.used set.
collectUsedGlobalVariables(M, Used, /*CompilerUsed*/ true);
DenseSet<GlobalValue::GUID> CantBePromoted;
for (auto *V : Used) {
if (V->hasLocalLinkage()) {
LocalsUsed.insert(V);
CantBePromoted.insert(V->getGUID());
}
}
bool HasLocalInlineAsmSymbol = false;
if (!M.getModuleInlineAsm().empty()) {
// Collect the local values defined by module level asm, and set up
// summaries for these symbols so that they can be marked as NoRename,
// to prevent export of any use of them in regular IR that would require
// renaming within the module level asm. Note we don't need to create a
// summary for weak or global defs, as they don't need to be flagged as
// NoRename, and defs in module level asm can't be imported anyway.
// Also, any values used but not defined within module level asm should
// be listed on the llvm.used or llvm.compiler.used global and marked as
// referenced from there.
ModuleSymbolTable::CollectAsmSymbols(
M, [&](StringRef Name, object::BasicSymbolRef::Flags Flags) {
// Symbols not marked as Weak or Global are local definitions.
if (Flags & (object::BasicSymbolRef::SF_Weak |
object::BasicSymbolRef::SF_Global))
return;
HasLocalInlineAsmSymbol = true;
GlobalValue *GV = M.getNamedValue(Name);
if (!GV)
return;
assert(GV->isDeclaration() && "Def in module asm already has definition");
GlobalValueSummary::GVFlags GVFlags(GlobalValue::InternalLinkage,
/* NotEligibleToImport = */ true,
/* Live = */ true,
/* Local */ GV->isDSOLocal());
CantBePromoted.insert(GV->getGUID());
// Create the appropriate summary type.
if (Function *F = dyn_cast<Function>(GV)) {
std::unique_ptr<FunctionSummary> Summary =
llvm::make_unique<FunctionSummary>(
GVFlags, 0,
FunctionSummary::FFlags{
F->hasFnAttribute(Attribute::ReadNone),
F->hasFnAttribute(Attribute::ReadOnly),
F->hasFnAttribute(Attribute::NoRecurse),
F->returnDoesNotAlias()},
ArrayRef<ValueInfo>{}, ArrayRef<FunctionSummary::EdgeTy>{},
ArrayRef<GlobalValue::GUID>{},
ArrayRef<FunctionSummary::VFuncId>{},
ArrayRef<FunctionSummary::VFuncId>{},
ArrayRef<FunctionSummary::ConstVCall>{},
ArrayRef<FunctionSummary::ConstVCall>{});
Index.addGlobalValueSummary(*GV, std::move(Summary));
} else {
std::unique_ptr<GlobalVarSummary> Summary =
llvm::make_unique<GlobalVarSummary>(GVFlags,
ArrayRef<ValueInfo>{});
Index.addGlobalValueSummary(*GV, std::move(Summary));
}
});
}
// Compute summaries for all functions defined in module, and save in the
// index.
for (auto &F : M) {
if (F.isDeclaration())
continue;
BlockFrequencyInfo *BFI = nullptr;
std::unique_ptr<BlockFrequencyInfo> BFIPtr;
if (GetBFICallback)
BFI = GetBFICallback(F);
else if (F.hasProfileData()) {
LoopInfo LI{DominatorTree(const_cast<Function &>(F))};
BranchProbabilityInfo BPI{F, LI};
BFIPtr = llvm::make_unique<BlockFrequencyInfo>(F, BPI, LI);
BFI = BFIPtr.get();
}
computeFunctionSummary(Index, M, F, BFI, PSI,
!LocalsUsed.empty() || HasLocalInlineAsmSymbol,
CantBePromoted);
}
// Compute summaries for all variables defined in module, and save in the
// index.
for (const GlobalVariable &G : M.globals()) {
if (G.isDeclaration())
continue;
computeVariableSummary(Index, G, CantBePromoted);
}
// Compute summaries for all aliases defined in module, and save in the
// index.
for (const GlobalAlias &A : M.aliases())
computeAliasSummary(Index, A, CantBePromoted);
for (auto *V : LocalsUsed) {
auto *Summary = Index.getGlobalValueSummary(*V);
assert(Summary && "Missing summary for global value");
Summary->setNotEligibleToImport();
}
// The linker doesn't know about these LLVM produced values, so we need
// to flag them as live in the index to ensure index-based dead value
// analysis treats them as live roots of the analysis.
setLiveRoot(Index, "llvm.used");
setLiveRoot(Index, "llvm.compiler.used");
setLiveRoot(Index, "llvm.global_ctors");
setLiveRoot(Index, "llvm.global_dtors");
setLiveRoot(Index, "llvm.global.annotations");
bool IsThinLTO = true;
if (auto *MD =
mdconst::extract_or_null<ConstantInt>(M.getModuleFlag("ThinLTO")))
IsThinLTO = MD->getZExtValue();
for (auto &GlobalList : Index) {
// Ignore entries for references that are undefined in the current module.
if (GlobalList.second.SummaryList.empty())
continue;
assert(GlobalList.second.SummaryList.size() == 1 &&
"Expected module's index to have one summary per GUID");
auto &Summary = GlobalList.second.SummaryList[0];
if (!IsThinLTO) {
Summary->setNotEligibleToImport();
continue;
}
bool AllRefsCanBeExternallyReferenced =
llvm::all_of(Summary->refs(), [&](const ValueInfo &VI) {
return !CantBePromoted.count(VI.getGUID());
});
if (!AllRefsCanBeExternallyReferenced) {
Summary->setNotEligibleToImport();
continue;
}
if (auto *FuncSummary = dyn_cast<FunctionSummary>(Summary.get())) {
bool AllCallsCanBeExternallyReferenced = llvm::all_of(
FuncSummary->calls(), [&](const FunctionSummary::EdgeTy &Edge) {
return !CantBePromoted.count(Edge.first.getGUID());
});
if (!AllCallsCanBeExternallyReferenced)
Summary->setNotEligibleToImport();
}
}
return Index;
}
AnalysisKey ModuleSummaryIndexAnalysis::Key;
ModuleSummaryIndex
ModuleSummaryIndexAnalysis::run(Module &M, ModuleAnalysisManager &AM) {
ProfileSummaryInfo &PSI = AM.getResult<ProfileSummaryAnalysis>(M);
auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
return buildModuleSummaryIndex(
M,
[&FAM](const Function &F) {
return &FAM.getResult<BlockFrequencyAnalysis>(
*const_cast<Function *>(&F));
},
&PSI);
}
char ModuleSummaryIndexWrapperPass::ID = 0;
INITIALIZE_PASS_BEGIN(ModuleSummaryIndexWrapperPass, "module-summary-analysis",
"Module Summary Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
INITIALIZE_PASS_END(ModuleSummaryIndexWrapperPass, "module-summary-analysis",
"Module Summary Analysis", false, true)
ModulePass *llvm::createModuleSummaryIndexWrapperPass() {
return new ModuleSummaryIndexWrapperPass();
}
ModuleSummaryIndexWrapperPass::ModuleSummaryIndexWrapperPass()
: ModulePass(ID) {
initializeModuleSummaryIndexWrapperPassPass(*PassRegistry::getPassRegistry());
}
bool ModuleSummaryIndexWrapperPass::runOnModule(Module &M) {
auto &PSI = *getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
Index.emplace(buildModuleSummaryIndex(
M,
[this](const Function &F) {
return &(this->getAnalysis<BlockFrequencyInfoWrapperPass>(
*const_cast<Function *>(&F))
.getBFI());
},
&PSI));
return false;
}
bool ModuleSummaryIndexWrapperPass::doFinalization(Module &M) {
Index.reset();
return false;
}
void ModuleSummaryIndexWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<BlockFrequencyInfoWrapperPass>();
AU.addRequired<ProfileSummaryInfoWrapperPass>();
}
Index: projects/clang700-import/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/lib/CodeGen/AsmPrinter/DwarfDebug.cpp (revision 340125)
@@ -1,2454 +1,2454 @@
//===- llvm/CodeGen/DwarfDebug.cpp - Dwarf Debug Framework ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains support for writing dwarf debug info into asm files.
//
//===----------------------------------------------------------------------===//
#include "DwarfDebug.h"
#include "ByteStreamer.h"
#include "DIEHash.h"
#include "DebugLocEntry.h"
#include "DebugLocStream.h"
#include "DwarfCompileUnit.h"
#include "DwarfExpression.h"
#include "DwarfFile.h"
#include "DwarfUnit.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/CodeGen/AccelTable.h"
#include "llvm/CodeGen/AsmPrinter.h"
#include "llvm/CodeGen/DIE.h"
#include "llvm/CodeGen/LexicalScopes.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCTargetOptions.h"
#include "llvm/MC/MachineLocation.h"
#include "llvm/MC/SectionKind.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <string>
#include <utility>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "dwarfdebug"
static cl::opt<bool>
DisableDebugInfoPrinting("disable-debug-info-print", cl::Hidden,
cl::desc("Disable debug info printing"));
static cl::opt<bool> UseDwarfRangesBaseAddressSpecifier(
"use-dwarf-ranges-base-address-specifier", cl::Hidden,
cl::desc("Use base address specifiers in debug_ranges"), cl::init(false));
static cl::opt<bool> GenerateARangeSection("generate-arange-section",
cl::Hidden,
cl::desc("Generate dwarf aranges"),
cl::init(false));
static cl::opt<bool>
GenerateDwarfTypeUnits("generate-type-units", cl::Hidden,
cl::desc("Generate DWARF4 type units."),
cl::init(false));
static cl::opt<bool> SplitDwarfCrossCuReferences(
"split-dwarf-cross-cu-references", cl::Hidden,
cl::desc("Enable cross-cu references in DWO files"), cl::init(false));
enum DefaultOnOff { Default, Enable, Disable };
static cl::opt<DefaultOnOff> UnknownLocations(
"use-unknown-locations", cl::Hidden,
cl::desc("Make an absence of debug location information explicit."),
cl::values(clEnumVal(Default, "At top of block or after label"),
clEnumVal(Enable, "In all cases"), clEnumVal(Disable, "Never")),
cl::init(Default));
static cl::opt<AccelTableKind> AccelTables(
"accel-tables", cl::Hidden, cl::desc("Output dwarf accelerator tables."),
cl::values(clEnumValN(AccelTableKind::Default, "Default",
"Default for platform"),
clEnumValN(AccelTableKind::None, "Disable", "Disabled."),
clEnumValN(AccelTableKind::Apple, "Apple", "Apple"),
clEnumValN(AccelTableKind::Dwarf, "Dwarf", "DWARF")),
cl::init(AccelTableKind::Default));
static cl::opt<DefaultOnOff>
DwarfInlinedStrings("dwarf-inlined-strings", cl::Hidden,
cl::desc("Use inlined strings rather than string section."),
cl::values(clEnumVal(Default, "Default for platform"),
clEnumVal(Enable, "Enabled"),
clEnumVal(Disable, "Disabled")),
cl::init(Default));
static cl::opt<bool>
NoDwarfPubSections("no-dwarf-pub-sections", cl::Hidden,
cl::desc("Disable emission of DWARF pub sections."),
cl::init(false));
static cl::opt<bool>
NoDwarfRangesSection("no-dwarf-ranges-section", cl::Hidden,
cl::desc("Disable emission .debug_ranges section."),
cl::init(false));
static cl::opt<DefaultOnOff> DwarfSectionsAsReferences(
"dwarf-sections-as-references", cl::Hidden,
cl::desc("Use sections+offset as references rather than labels."),
cl::values(clEnumVal(Default, "Default for platform"),
clEnumVal(Enable, "Enabled"), clEnumVal(Disable, "Disabled")),
cl::init(Default));
enum LinkageNameOption {
DefaultLinkageNames,
AllLinkageNames,
AbstractLinkageNames
};
static cl::opt<LinkageNameOption>
DwarfLinkageNames("dwarf-linkage-names", cl::Hidden,
cl::desc("Which DWARF linkage-name attributes to emit."),
cl::values(clEnumValN(DefaultLinkageNames, "Default",
"Default for platform"),
clEnumValN(AllLinkageNames, "All", "All"),
clEnumValN(AbstractLinkageNames, "Abstract",
"Abstract subprograms")),
cl::init(DefaultLinkageNames));
static const char *const DWARFGroupName = "dwarf";
static const char *const DWARFGroupDescription = "DWARF Emission";
static const char *const DbgTimerName = "writer";
static const char *const DbgTimerDescription = "DWARF Debug Writer";
void DebugLocDwarfExpression::emitOp(uint8_t Op, const char *Comment) {
BS.EmitInt8(
Op, Comment ? Twine(Comment) + " " + dwarf::OperationEncodingString(Op)
: dwarf::OperationEncodingString(Op));
}
void DebugLocDwarfExpression::emitSigned(int64_t Value) {
BS.EmitSLEB128(Value, Twine(Value));
}
void DebugLocDwarfExpression::emitUnsigned(uint64_t Value) {
BS.EmitULEB128(Value, Twine(Value));
}
bool DebugLocDwarfExpression::isFrameRegister(const TargetRegisterInfo &TRI,
unsigned MachineReg) {
// This information is not available while emitting .debug_loc entries.
return false;
}
bool DbgVariable::isBlockByrefVariable() const {
assert(Var && "Invalid complex DbgVariable!");
return Var->getType().resolve()->isBlockByrefStruct();
}
const DIType *DbgVariable::getType() const {
DIType *Ty = Var->getType().resolve();
// FIXME: isBlockByrefVariable should be reformulated in terms of complex
// addresses instead.
if (Ty->isBlockByrefStruct()) {
/* Byref variables, in Blocks, are declared by the programmer as
"SomeType VarName;", but the compiler creates a
__Block_byref_x_VarName struct, and gives the variable VarName
either the struct, or a pointer to the struct, as its type. This
is necessary for various behind-the-scenes things the compiler
needs to do with by-reference variables in blocks.
However, as far as the original *programmer* is concerned, the
variable should still have type 'SomeType', as originally declared.
The following function dives into the __Block_byref_x_VarName
struct to find the original type of the variable. This will be
passed back to the code generating the type for the Debug
Information Entry for the variable 'VarName'. 'VarName' will then
have the original type 'SomeType' in its debug information.
The original type 'SomeType' will be the type of the field named
'VarName' inside the __Block_byref_x_VarName struct.
NOTE: In order for this to not completely fail on the debugger
side, the Debug Information Entry for the variable VarName needs to
have a DW_AT_location that tells the debugger how to unwind through
the pointers and __Block_byref_x_VarName struct to find the actual
value of the variable. The function addBlockByrefType does this. */
DIType *subType = Ty;
uint16_t tag = Ty->getTag();
if (tag == dwarf::DW_TAG_pointer_type)
subType = resolve(cast<DIDerivedType>(Ty)->getBaseType());
auto Elements = cast<DICompositeType>(subType)->getElements();
for (unsigned i = 0, N = Elements.size(); i < N; ++i) {
auto *DT = cast<DIDerivedType>(Elements[i]);
if (getName() == DT->getName())
return resolve(DT->getBaseType());
}
}
return Ty;
}
ArrayRef<DbgVariable::FrameIndexExpr> DbgVariable::getFrameIndexExprs() const {
if (FrameIndexExprs.size() == 1)
return FrameIndexExprs;
assert(llvm::all_of(FrameIndexExprs,
[](const FrameIndexExpr &A) {
return A.Expr->isFragment();
}) &&
"multiple FI expressions without DW_OP_LLVM_fragment");
llvm::sort(FrameIndexExprs.begin(), FrameIndexExprs.end(),
[](const FrameIndexExpr &A, const FrameIndexExpr &B) -> bool {
return A.Expr->getFragmentInfo()->OffsetInBits <
B.Expr->getFragmentInfo()->OffsetInBits;
});
return FrameIndexExprs;
}
void DbgVariable::addMMIEntry(const DbgVariable &V) {
assert(DebugLocListIndex == ~0U && !MInsn && "not an MMI entry");
assert(V.DebugLocListIndex == ~0U && !V.MInsn && "not an MMI entry");
assert(V.Var == Var && "conflicting variable");
assert(V.IA == IA && "conflicting inlined-at location");
assert(!FrameIndexExprs.empty() && "Expected an MMI entry");
assert(!V.FrameIndexExprs.empty() && "Expected an MMI entry");
// FIXME: This logic should not be necessary anymore, as we now have proper
// deduplication. However, without it, we currently run into the assertion
// below, which means that we are likely dealing with broken input, i.e. two
// non-fragment entries for the same variable at different frame indices.
if (FrameIndexExprs.size()) {
auto *Expr = FrameIndexExprs.back().Expr;
if (!Expr || !Expr->isFragment())
return;
}
for (const auto &FIE : V.FrameIndexExprs)
// Ignore duplicate entries.
if (llvm::none_of(FrameIndexExprs, [&](const FrameIndexExpr &Other) {
return FIE.FI == Other.FI && FIE.Expr == Other.Expr;
}))
FrameIndexExprs.push_back(FIE);
assert((FrameIndexExprs.size() == 1 ||
llvm::all_of(FrameIndexExprs,
[](FrameIndexExpr &FIE) {
return FIE.Expr && FIE.Expr->isFragment();
})) &&
"conflicting locations for variable");
}
static AccelTableKind computeAccelTableKind(unsigned DwarfVersion,
bool GenerateTypeUnits,
DebuggerKind Tuning,
const Triple &TT) {
// Honor an explicit request.
if (AccelTables != AccelTableKind::Default)
return AccelTables;
// Accelerator tables with type units are currently not supported.
if (GenerateTypeUnits)
return AccelTableKind::None;
// Accelerator tables get emitted if targetting DWARF v5 or LLDB. DWARF v5
// always implies debug_names. For lower standard versions we use apple
// accelerator tables on apple platforms and debug_names elsewhere.
if (DwarfVersion >= 5)
return AccelTableKind::Dwarf;
if (Tuning == DebuggerKind::LLDB)
return TT.isOSBinFormatMachO() ? AccelTableKind::Apple
: AccelTableKind::Dwarf;
return AccelTableKind::None;
}
DwarfDebug::DwarfDebug(AsmPrinter *A, Module *M)
: DebugHandlerBase(A), DebugLocs(A->OutStreamer->isVerboseAsm()),
InfoHolder(A, "info_string", DIEValueAllocator),
SkeletonHolder(A, "skel_string", DIEValueAllocator),
IsDarwin(A->TM.getTargetTriple().isOSDarwin()) {
const Triple &TT = Asm->TM.getTargetTriple();
// Make sure we know our "debugger tuning." The target option takes
// precedence; fall back to triple-based defaults.
if (Asm->TM.Options.DebuggerTuning != DebuggerKind::Default)
DebuggerTuning = Asm->TM.Options.DebuggerTuning;
else if (IsDarwin)
DebuggerTuning = DebuggerKind::LLDB;
else if (TT.isPS4CPU())
DebuggerTuning = DebuggerKind::SCE;
else
DebuggerTuning = DebuggerKind::GDB;
if (DwarfInlinedStrings == Default)
UseInlineStrings = TT.isNVPTX();
else
UseInlineStrings = DwarfInlinedStrings == Enable;
UseLocSection = !TT.isNVPTX();
HasAppleExtensionAttributes = tuneForLLDB();
// Handle split DWARF.
HasSplitDwarf = !Asm->TM.Options.MCOptions.SplitDwarfFile.empty();
// SCE defaults to linkage names only for abstract subprograms.
if (DwarfLinkageNames == DefaultLinkageNames)
UseAllLinkageNames = !tuneForSCE();
else
UseAllLinkageNames = DwarfLinkageNames == AllLinkageNames;
unsigned DwarfVersionNumber = Asm->TM.Options.MCOptions.DwarfVersion;
unsigned DwarfVersion = DwarfVersionNumber ? DwarfVersionNumber
: MMI->getModule()->getDwarfVersion();
// Use dwarf 4 by default if nothing is requested. For NVPTX, use dwarf 2.
DwarfVersion =
TT.isNVPTX() ? 2 : (DwarfVersion ? DwarfVersion : dwarf::DWARF_VERSION);
UsePubSections = !NoDwarfPubSections && !TT.isNVPTX();
UseRangesSection = !NoDwarfRangesSection && !TT.isNVPTX();
// Use sections as references. Force for NVPTX.
if (DwarfSectionsAsReferences == Default)
UseSectionsAsReferences = TT.isNVPTX();
else
UseSectionsAsReferences = DwarfSectionsAsReferences == Enable;
// Don't generate type units for unsupported object file formats.
GenerateTypeUnits =
A->TM.getTargetTriple().isOSBinFormatELF() && GenerateDwarfTypeUnits;
TheAccelTableKind = computeAccelTableKind(
DwarfVersion, GenerateTypeUnits, DebuggerTuning, A->TM.getTargetTriple());
// Work around a GDB bug. GDB doesn't support the standard opcode;
// SCE doesn't support GNU's; LLDB prefers the standard opcode, which
// is defined as of DWARF 3.
// See GDB bug 11616 - DW_OP_form_tls_address is unimplemented
// https://sourceware.org/bugzilla/show_bug.cgi?id=11616
UseGNUTLSOpcode = tuneForGDB() || DwarfVersion < 3;
// GDB does not fully support the DWARF 4 representation for bitfields.
UseDWARF2Bitfields = (DwarfVersion < 4) || tuneForGDB();
// The DWARF v5 string offsets table has - possibly shared - contributions
// from each compile and type unit each preceded by a header. The string
// offsets table used by the pre-DWARF v5 split-DWARF implementation uses
// a monolithic string offsets table without any header.
UseSegmentedStringOffsetsTable = DwarfVersion >= 5;
Asm->OutStreamer->getContext().setDwarfVersion(DwarfVersion);
}
// Define out of line so we don't have to include DwarfUnit.h in DwarfDebug.h.
DwarfDebug::~DwarfDebug() = default;
static bool isObjCClass(StringRef Name) {
return Name.startswith("+") || Name.startswith("-");
}
static bool hasObjCCategory(StringRef Name) {
if (!isObjCClass(Name))
return false;
return Name.find(") ") != StringRef::npos;
}
static void getObjCClassCategory(StringRef In, StringRef &Class,
StringRef &Category) {
if (!hasObjCCategory(In)) {
Class = In.slice(In.find('[') + 1, In.find(' '));
Category = "";
return;
}
Class = In.slice(In.find('[') + 1, In.find('('));
Category = In.slice(In.find('[') + 1, In.find(' '));
}
static StringRef getObjCMethodName(StringRef In) {
return In.slice(In.find(' ') + 1, In.find(']'));
}
// Add the various names to the Dwarf accelerator table names.
void DwarfDebug::addSubprogramNames(const DISubprogram *SP, DIE &Die) {
if (!SP->isDefinition())
return;
if (SP->getName() != "")
addAccelName(SP->getName(), Die);
// If the linkage name is different than the name, go ahead and output that as
// well into the name table. Only do that if we are going to actually emit
// that name.
if (SP->getLinkageName() != "" && SP->getName() != SP->getLinkageName() &&
(useAllLinkageNames() || InfoHolder.getAbstractSPDies().lookup(SP)))
addAccelName(SP->getLinkageName(), Die);
// If this is an Objective-C selector name add it to the ObjC accelerator
// too.
if (isObjCClass(SP->getName())) {
StringRef Class, Category;
getObjCClassCategory(SP->getName(), Class, Category);
addAccelObjC(Class, Die);
if (Category != "")
addAccelObjC(Category, Die);
// Also add the base method name to the name table.
addAccelName(getObjCMethodName(SP->getName()), Die);
}
}
/// Check whether we should create a DIE for the given Scope, return true
/// if we don't create a DIE (the corresponding DIE is null).
bool DwarfDebug::isLexicalScopeDIENull(LexicalScope *Scope) {
if (Scope->isAbstractScope())
return false;
// We don't create a DIE if there is no Range.
const SmallVectorImpl<InsnRange> &Ranges = Scope->getRanges();
if (Ranges.empty())
return true;
if (Ranges.size() > 1)
return false;
// We don't create a DIE if we have a single Range and the end label
// is null.
return !getLabelAfterInsn(Ranges.front().second);
}
template <typename Func> static void forBothCUs(DwarfCompileUnit &CU, Func F) {
F(CU);
if (auto *SkelCU = CU.getSkeleton())
if (CU.getCUNode()->getSplitDebugInlining())
F(*SkelCU);
}
bool DwarfDebug::shareAcrossDWOCUs() const {
return SplitDwarfCrossCuReferences;
}
void DwarfDebug::constructAbstractSubprogramScopeDIE(DwarfCompileUnit &SrcCU,
LexicalScope *Scope) {
assert(Scope && Scope->getScopeNode());
assert(Scope->isAbstractScope());
assert(!Scope->getInlinedAt());
auto *SP = cast<DISubprogram>(Scope->getScopeNode());
// Find the subprogram's DwarfCompileUnit in the SPMap in case the subprogram
// was inlined from another compile unit.
if (useSplitDwarf() && !shareAcrossDWOCUs() && !SP->getUnit()->getSplitDebugInlining())
// Avoid building the original CU if it won't be used
SrcCU.constructAbstractSubprogramScopeDIE(Scope);
else {
auto &CU = getOrCreateDwarfCompileUnit(SP->getUnit());
if (auto *SkelCU = CU.getSkeleton()) {
(shareAcrossDWOCUs() ? CU : SrcCU)
.constructAbstractSubprogramScopeDIE(Scope);
if (CU.getCUNode()->getSplitDebugInlining())
SkelCU->constructAbstractSubprogramScopeDIE(Scope);
} else
CU.constructAbstractSubprogramScopeDIE(Scope);
}
}
void DwarfDebug::addGnuPubAttributes(DwarfCompileUnit &U, DIE &D) const {
if (!U.hasDwarfPubSections())
return;
U.addFlag(D, dwarf::DW_AT_GNU_pubnames);
}
// Create new DwarfCompileUnit for the given metadata node with tag
// DW_TAG_compile_unit.
DwarfCompileUnit &
DwarfDebug::getOrCreateDwarfCompileUnit(const DICompileUnit *DIUnit) {
if (auto *CU = CUMap.lookup(DIUnit))
return *CU;
StringRef FN = DIUnit->getFilename();
CompilationDir = DIUnit->getDirectory();
auto OwnedUnit = llvm::make_unique<DwarfCompileUnit>(
InfoHolder.getUnits().size(), DIUnit, Asm, this, &InfoHolder);
DwarfCompileUnit &NewCU = *OwnedUnit;
DIE &Die = NewCU.getUnitDie();
InfoHolder.addUnit(std::move(OwnedUnit));
if (useSplitDwarf()) {
NewCU.setSkeleton(constructSkeletonCU(NewCU));
NewCU.addString(Die, dwarf::DW_AT_GNU_dwo_name,
Asm->TM.Options.MCOptions.SplitDwarfFile);
}
for (auto *IE : DIUnit->getImportedEntities())
NewCU.addImportedEntity(IE);
// LTO with assembly output shares a single line table amongst multiple CUs.
// To avoid the compilation directory being ambiguous, let the line table
// explicitly describe the directory of all files, never relying on the
// compilation directory.
if (!Asm->OutStreamer->hasRawTextSupport() || SingleCU)
Asm->OutStreamer->emitDwarfFile0Directive(
CompilationDir, FN, NewCU.getMD5AsBytes(DIUnit->getFile()),
DIUnit->getSource(), NewCU.getUniqueID());
StringRef Producer = DIUnit->getProducer();
StringRef Flags = DIUnit->getFlags();
if (!Flags.empty()) {
std::string ProducerWithFlags = Producer.str() + " " + Flags.str();
NewCU.addString(Die, dwarf::DW_AT_producer, ProducerWithFlags);
} else
NewCU.addString(Die, dwarf::DW_AT_producer, Producer);
NewCU.addUInt(Die, dwarf::DW_AT_language, dwarf::DW_FORM_data2,
DIUnit->getSourceLanguage());
NewCU.addString(Die, dwarf::DW_AT_name, FN);
// Add DW_str_offsets_base to the unit DIE, except for split units.
if (useSegmentedStringOffsetsTable() && !useSplitDwarf())
NewCU.addStringOffsetsStart();
if (!useSplitDwarf()) {
NewCU.initStmtList();
// If we're using split dwarf the compilation dir is going to be in the
// skeleton CU and so we don't need to duplicate it here.
if (!CompilationDir.empty())
NewCU.addString(Die, dwarf::DW_AT_comp_dir, CompilationDir);
addGnuPubAttributes(NewCU, Die);
}
if (useAppleExtensionAttributes()) {
if (DIUnit->isOptimized())
NewCU.addFlag(Die, dwarf::DW_AT_APPLE_optimized);
StringRef Flags = DIUnit->getFlags();
if (!Flags.empty())
NewCU.addString(Die, dwarf::DW_AT_APPLE_flags, Flags);
if (unsigned RVer = DIUnit->getRuntimeVersion())
NewCU.addUInt(Die, dwarf::DW_AT_APPLE_major_runtime_vers,
dwarf::DW_FORM_data1, RVer);
}
if (useSplitDwarf())
NewCU.setSection(Asm->getObjFileLowering().getDwarfInfoDWOSection());
else
NewCU.setSection(Asm->getObjFileLowering().getDwarfInfoSection());
if (DIUnit->getDWOId()) {
// This CU is either a clang module DWO or a skeleton CU.
NewCU.addUInt(Die, dwarf::DW_AT_GNU_dwo_id, dwarf::DW_FORM_data8,
DIUnit->getDWOId());
if (!DIUnit->getSplitDebugFilename().empty())
// This is a prefabricated skeleton CU.
NewCU.addString(Die, dwarf::DW_AT_GNU_dwo_name,
DIUnit->getSplitDebugFilename());
}
CUMap.insert({DIUnit, &NewCU});
CUDieMap.insert({&Die, &NewCU});
return NewCU;
}
void DwarfDebug::constructAndAddImportedEntityDIE(DwarfCompileUnit &TheCU,
const DIImportedEntity *N) {
if (isa<DILocalScope>(N->getScope()))
return;
if (DIE *D = TheCU.getOrCreateContextDIE(N->getScope()))
D->addChild(TheCU.constructImportedEntityDIE(N));
}
/// Sort and unique GVEs by comparing their fragment offset.
static SmallVectorImpl<DwarfCompileUnit::GlobalExpr> &
sortGlobalExprs(SmallVectorImpl<DwarfCompileUnit::GlobalExpr> &GVEs) {
llvm::sort(GVEs.begin(), GVEs.end(),
[](DwarfCompileUnit::GlobalExpr A,
DwarfCompileUnit::GlobalExpr B) {
// Sort order: first null exprs, then exprs without fragment
// info, then sort by fragment offset in bits.
// FIXME: Come up with a more comprehensive comparator so
// the sorting isn't non-deterministic, and so the following
// std::unique call works correctly.
if (!A.Expr || !B.Expr)
return !!B.Expr;
auto FragmentA = A.Expr->getFragmentInfo();
auto FragmentB = B.Expr->getFragmentInfo();
if (!FragmentA || !FragmentB)
return !!FragmentB;
return FragmentA->OffsetInBits < FragmentB->OffsetInBits;
});
GVEs.erase(std::unique(GVEs.begin(), GVEs.end(),
[](DwarfCompileUnit::GlobalExpr A,
DwarfCompileUnit::GlobalExpr B) {
return A.Expr == B.Expr;
}),
GVEs.end());
return GVEs;
}
// Emit all Dwarf sections that should come prior to the content. Create
// global DIEs and emit initial debug info sections. This is invoked by
// the target AsmPrinter.
void DwarfDebug::beginModule() {
NamedRegionTimer T(DbgTimerName, DbgTimerDescription, DWARFGroupName,
DWARFGroupDescription, TimePassesIsEnabled);
if (DisableDebugInfoPrinting)
return;
const Module *M = MMI->getModule();
unsigned NumDebugCUs = std::distance(M->debug_compile_units_begin(),
M->debug_compile_units_end());
// Tell MMI whether we have debug info.
MMI->setDebugInfoAvailability(NumDebugCUs > 0);
SingleCU = NumDebugCUs == 1;
DenseMap<DIGlobalVariable *, SmallVector<DwarfCompileUnit::GlobalExpr, 1>>
GVMap;
for (const GlobalVariable &Global : M->globals()) {
SmallVector<DIGlobalVariableExpression *, 1> GVs;
Global.getDebugInfo(GVs);
for (auto *GVE : GVs)
GVMap[GVE->getVariable()].push_back({&Global, GVE->getExpression()});
}
// Create the symbol that designates the start of the unit's contribution
// to the string offsets table. In a split DWARF scenario, only the skeleton
// unit has the DW_AT_str_offsets_base attribute (and hence needs the symbol).
if (useSegmentedStringOffsetsTable())
(useSplitDwarf() ? SkeletonHolder : InfoHolder)
.setStringOffsetsStartSym(Asm->createTempSymbol("str_offsets_base"));
// Create the symbol that designates the start of the DWARF v5 range list
// table. It is located past the header and before the offsets table.
if (getDwarfVersion() >= 5)
(useSplitDwarf() ? SkeletonHolder : InfoHolder)
.setRnglistsTableBaseSym(Asm->createTempSymbol("rnglists_table_base"));
for (DICompileUnit *CUNode : M->debug_compile_units()) {
// FIXME: Move local imported entities into a list attached to the
// subprogram, then this search won't be needed and a
// getImportedEntities().empty() test should go below with the rest.
bool HasNonLocalImportedEntities = llvm::any_of(
CUNode->getImportedEntities(), [](const DIImportedEntity *IE) {
return !isa<DILocalScope>(IE->getScope());
});
if (!HasNonLocalImportedEntities && CUNode->getEnumTypes().empty() &&
CUNode->getRetainedTypes().empty() &&
CUNode->getGlobalVariables().empty() && CUNode->getMacros().empty())
continue;
DwarfCompileUnit &CU = getOrCreateDwarfCompileUnit(CUNode);
// Global Variables.
for (auto *GVE : CUNode->getGlobalVariables()) {
// Don't bother adding DIGlobalVariableExpressions listed in the CU if we
// already know about the variable and it isn't adding a constant
// expression.
auto &GVMapEntry = GVMap[GVE->getVariable()];
auto *Expr = GVE->getExpression();
if (!GVMapEntry.size() || (Expr && Expr->isConstant()))
GVMapEntry.push_back({nullptr, Expr});
}
DenseSet<DIGlobalVariable *> Processed;
for (auto *GVE : CUNode->getGlobalVariables()) {
DIGlobalVariable *GV = GVE->getVariable();
if (Processed.insert(GV).second)
CU.getOrCreateGlobalVariableDIE(GV, sortGlobalExprs(GVMap[GV]));
}
for (auto *Ty : CUNode->getEnumTypes()) {
// The enum types array by design contains pointers to
// MDNodes rather than DIRefs. Unique them here.
CU.getOrCreateTypeDIE(cast<DIType>(Ty));
}
for (auto *Ty : CUNode->getRetainedTypes()) {
// The retained types array by design contains pointers to
// MDNodes rather than DIRefs. Unique them here.
if (DIType *RT = dyn_cast<DIType>(Ty))
// There is no point in force-emitting a forward declaration.
CU.getOrCreateTypeDIE(RT);
}
// Emit imported_modules last so that the relevant context is already
// available.
for (auto *IE : CUNode->getImportedEntities())
constructAndAddImportedEntityDIE(CU, IE);
}
}
void DwarfDebug::finishVariableDefinitions() {
for (const auto &Var : ConcreteVariables) {
DIE *VariableDie = Var->getDIE();
assert(VariableDie);
// FIXME: Consider the time-space tradeoff of just storing the unit pointer
// in the ConcreteVariables list, rather than looking it up again here.
// DIE::getUnit isn't simple - it walks parent pointers, etc.
DwarfCompileUnit *Unit = CUDieMap.lookup(VariableDie->getUnitDie());
assert(Unit);
Unit->finishVariableDefinition(*Var);
}
}
void DwarfDebug::finishSubprogramDefinitions() {
for (const DISubprogram *SP : ProcessedSPNodes) {
assert(SP->getUnit()->getEmissionKind() != DICompileUnit::NoDebug);
forBothCUs(
getOrCreateDwarfCompileUnit(SP->getUnit()),
[&](DwarfCompileUnit &CU) { CU.finishSubprogramDefinition(SP); });
}
}
void DwarfDebug::finalizeModuleInfo() {
const TargetLoweringObjectFile &TLOF = Asm->getObjFileLowering();
finishSubprogramDefinitions();
finishVariableDefinitions();
// Include the DWO file name in the hash if there's more than one CU.
// This handles ThinLTO's situation where imported CUs may very easily be
// duplicate with the same CU partially imported into another ThinLTO unit.
StringRef DWOName;
if (CUMap.size() > 1)
DWOName = Asm->TM.Options.MCOptions.SplitDwarfFile;
// Handle anything that needs to be done on a per-unit basis after
// all other generation.
for (const auto &P : CUMap) {
auto &TheCU = *P.second;
// Emit DW_AT_containing_type attribute to connect types with their
// vtable holding type.
TheCU.constructContainingTypeDIEs();
// Add CU specific attributes if we need to add any.
// If we're splitting the dwarf out now that we've got the entire
// CU then add the dwo id to it.
auto *SkCU = TheCU.getSkeleton();
if (useSplitDwarf()) {
// Emit a unique identifier for this CU.
uint64_t ID =
DIEHash(Asm).computeCUSignature(DWOName, TheCU.getUnitDie());
if (getDwarfVersion() >= 5) {
TheCU.setDWOId(ID);
SkCU->setDWOId(ID);
} else {
TheCU.addUInt(TheCU.getUnitDie(), dwarf::DW_AT_GNU_dwo_id,
dwarf::DW_FORM_data8, ID);
SkCU->addUInt(SkCU->getUnitDie(), dwarf::DW_AT_GNU_dwo_id,
dwarf::DW_FORM_data8, ID);
}
// We don't keep track of which addresses are used in which CU so this
// is a bit pessimistic under LTO.
if (!AddrPool.isEmpty()) {
const MCSymbol *Sym = TLOF.getDwarfAddrSection()->getBeginSymbol();
SkCU->addSectionLabel(SkCU->getUnitDie(), dwarf::DW_AT_GNU_addr_base,
Sym, Sym);
}
if (getDwarfVersion() < 5 && !SkCU->getRangeLists().empty()) {
const MCSymbol *Sym = TLOF.getDwarfRangesSection()->getBeginSymbol();
SkCU->addSectionLabel(SkCU->getUnitDie(), dwarf::DW_AT_GNU_ranges_base,
Sym, Sym);
}
}
// If we have code split among multiple sections or non-contiguous
// ranges of code then emit a DW_AT_ranges attribute on the unit that will
// remain in the .o file, otherwise add a DW_AT_low_pc.
// FIXME: We should use ranges allow reordering of code ala
// .subsections_via_symbols in mach-o. This would mean turning on
// ranges for all subprogram DIEs for mach-o.
DwarfCompileUnit &U = SkCU ? *SkCU : TheCU;
if (unsigned NumRanges = TheCU.getRanges().size()) {
if (NumRanges > 1 && useRangesSection())
// A DW_AT_low_pc attribute may also be specified in combination with
// DW_AT_ranges to specify the default base address for use in
// location lists (see Section 2.6.2) and range lists (see Section
// 2.17.3).
U.addUInt(U.getUnitDie(), dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr, 0);
else
U.setBaseAddress(TheCU.getRanges().front().getStart());
U.attachRangesOrLowHighPC(U.getUnitDie(), TheCU.takeRanges());
}
if (getDwarfVersion() >= 5 && !useSplitDwarf() &&
!U.getRangeLists().empty())
U.addRnglistsBase();
auto *CUNode = cast<DICompileUnit>(P.first);
// If compile Unit has macros, emit "DW_AT_macro_info" attribute.
if (CUNode->getMacros())
U.addSectionLabel(U.getUnitDie(), dwarf::DW_AT_macro_info,
U.getMacroLabelBegin(),
TLOF.getDwarfMacinfoSection()->getBeginSymbol());
}
// Emit all frontend-produced Skeleton CUs, i.e., Clang modules.
for (auto *CUNode : MMI->getModule()->debug_compile_units())
if (CUNode->getDWOId())
getOrCreateDwarfCompileUnit(CUNode);
// Compute DIE offsets and sizes.
InfoHolder.computeSizeAndOffsets();
if (useSplitDwarf())
SkeletonHolder.computeSizeAndOffsets();
}
// Emit all Dwarf sections that should come after the content.
void DwarfDebug::endModule() {
assert(CurFn == nullptr);
assert(CurMI == nullptr);
// If we aren't actually generating debug info (check beginModule -
// conditionalized on !DisableDebugInfoPrinting and the presence of the
// llvm.dbg.cu metadata node)
if (!MMI->hasDebugInfo())
return;
// Finalize the debug info for the module.
finalizeModuleInfo();
emitDebugStr();
if (useSplitDwarf())
emitDebugLocDWO();
else
// Emit info into a debug loc section.
emitDebugLoc();
// Corresponding abbreviations into a abbrev section.
emitAbbreviations();
// Emit all the DIEs into a debug info section.
emitDebugInfo();
// Emit info into a debug aranges section.
if (GenerateARangeSection)
emitDebugARanges();
// Emit info into a debug ranges section.
emitDebugRanges();
// Emit info into a debug macinfo section.
emitDebugMacinfo();
if (useSplitDwarf()) {
emitDebugStrDWO();
emitDebugInfoDWO();
emitDebugAbbrevDWO();
emitDebugLineDWO();
emitDebugAddr();
}
// Emit info into the dwarf accelerator table sections.
switch (getAccelTableKind()) {
case AccelTableKind::Apple:
emitAccelNames();
emitAccelObjC();
emitAccelNamespaces();
emitAccelTypes();
break;
case AccelTableKind::Dwarf:
emitAccelDebugNames();
break;
case AccelTableKind::None:
break;
case AccelTableKind::Default:
llvm_unreachable("Default should have already been resolved.");
}
// Emit the pubnames and pubtypes sections if requested.
emitDebugPubSections();
// clean up.
// FIXME: AbstractVariables.clear();
}
void DwarfDebug::ensureAbstractVariableIsCreated(DwarfCompileUnit &CU, InlinedVariable IV,
const MDNode *ScopeNode) {
const DILocalVariable *Cleansed = nullptr;
if (CU.getExistingAbstractVariable(IV, Cleansed))
return;
CU.createAbstractVariable(Cleansed, LScopes.getOrCreateAbstractScope(
cast<DILocalScope>(ScopeNode)));
}
void DwarfDebug::ensureAbstractVariableIsCreatedIfScoped(DwarfCompileUnit &CU,
InlinedVariable IV, const MDNode *ScopeNode) {
const DILocalVariable *Cleansed = nullptr;
if (CU.getExistingAbstractVariable(IV, Cleansed))
return;
if (LexicalScope *Scope =
LScopes.findAbstractScope(cast_or_null<DILocalScope>(ScopeNode)))
CU.createAbstractVariable(Cleansed, Scope);
}
// Collect variable information from side table maintained by MF.
void DwarfDebug::collectVariableInfoFromMFTable(
DwarfCompileUnit &TheCU, DenseSet<InlinedVariable> &Processed) {
SmallDenseMap<InlinedVariable, DbgVariable *> MFVars;
for (const auto &VI : Asm->MF->getVariableDbgInfo()) {
if (!VI.Var)
continue;
assert(VI.Var->isValidLocationForIntrinsic(VI.Loc) &&
"Expected inlined-at fields to agree");
InlinedVariable Var(VI.Var, VI.Loc->getInlinedAt());
Processed.insert(Var);
LexicalScope *Scope = LScopes.findLexicalScope(VI.Loc);
// If variable scope is not found then skip this variable.
if (!Scope)
continue;
ensureAbstractVariableIsCreatedIfScoped(TheCU, Var, Scope->getScopeNode());
auto RegVar = llvm::make_unique<DbgVariable>(Var.first, Var.second);
RegVar->initializeMMI(VI.Expr, VI.Slot);
if (DbgVariable *DbgVar = MFVars.lookup(Var))
DbgVar->addMMIEntry(*RegVar);
else if (InfoHolder.addScopeVariable(Scope, RegVar.get())) {
MFVars.insert({Var, RegVar.get()});
ConcreteVariables.push_back(std::move(RegVar));
}
}
}
// Get .debug_loc entry for the instruction range starting at MI.
static DebugLocEntry::Value getDebugLocValue(const MachineInstr *MI) {
const DIExpression *Expr = MI->getDebugExpression();
assert(MI->getNumOperands() == 4);
if (MI->getOperand(0).isReg()) {
auto RegOp = MI->getOperand(0);
auto Op1 = MI->getOperand(1);
// If the second operand is an immediate, this is a
// register-indirect address.
assert((!Op1.isImm() || (Op1.getImm() == 0)) && "unexpected offset");
MachineLocation MLoc(RegOp.getReg(), Op1.isImm());
return DebugLocEntry::Value(Expr, MLoc);
}
if (MI->getOperand(0).isImm())
return DebugLocEntry::Value(Expr, MI->getOperand(0).getImm());
if (MI->getOperand(0).isFPImm())
return DebugLocEntry::Value(Expr, MI->getOperand(0).getFPImm());
if (MI->getOperand(0).isCImm())
return DebugLocEntry::Value(Expr, MI->getOperand(0).getCImm());
llvm_unreachable("Unexpected 4-operand DBG_VALUE instruction!");
}
/// If this and Next are describing different fragments of the same
/// variable, merge them by appending Next's values to the current
/// list of values.
/// Return true if the merge was successful.
bool DebugLocEntry::MergeValues(const DebugLocEntry &Next) {
if (Begin == Next.Begin) {
auto *FirstExpr = cast<DIExpression>(Values[0].Expression);
auto *FirstNextExpr = cast<DIExpression>(Next.Values[0].Expression);
if (!FirstExpr->isFragment() || !FirstNextExpr->isFragment())
return false;
// We can only merge entries if none of the fragments overlap any others.
// In doing so, we can take advantage of the fact that both lists are
// sorted.
for (unsigned i = 0, j = 0; i < Values.size(); ++i) {
for (; j < Next.Values.size(); ++j) {
int res = cast<DIExpression>(Values[i].Expression)->fragmentCmp(
cast<DIExpression>(Next.Values[j].Expression));
if (res == 0) // The two expressions overlap, we can't merge.
return false;
// Values[i] is entirely before Next.Values[j],
// so go back to the next entry of Values.
else if (res == -1)
break;
// Next.Values[j] is entirely before Values[i], so go on to the
// next entry of Next.Values.
}
}
addValues(Next.Values);
End = Next.End;
return true;
}
return false;
}
/// Build the location list for all DBG_VALUEs in the function that
/// describe the same variable. If the ranges of several independent
/// fragments of the same variable overlap partially, split them up and
/// combine the ranges. The resulting DebugLocEntries are will have
/// strict monotonically increasing begin addresses and will never
/// overlap.
//
// Input:
//
// Ranges History [var, loc, fragment ofs size]
// 0 | [x, (reg0, fragment 0, 32)]
// 1 | | [x, (reg1, fragment 32, 32)] <- IsFragmentOfPrevEntry
// 2 | | ...
// 3 | [clobber reg0]
// 4 [x, (mem, fragment 0, 64)] <- overlapping with both previous fragments of
// x.
//
// Output:
//
// [0-1] [x, (reg0, fragment 0, 32)]
// [1-3] [x, (reg0, fragment 0, 32), (reg1, fragment 32, 32)]
// [3-4] [x, (reg1, fragment 32, 32)]
// [4- ] [x, (mem, fragment 0, 64)]
void
DwarfDebug::buildLocationList(SmallVectorImpl<DebugLocEntry> &DebugLoc,
const DbgValueHistoryMap::InstrRanges &Ranges) {
SmallVector<DebugLocEntry::Value, 4> OpenRanges;
for (auto I = Ranges.begin(), E = Ranges.end(); I != E; ++I) {
const MachineInstr *Begin = I->first;
const MachineInstr *End = I->second;
assert(Begin->isDebugValue() && "Invalid History entry");
// Check if a variable is inaccessible in this range.
if (Begin->getNumOperands() > 1 &&
Begin->getOperand(0).isReg() && !Begin->getOperand(0).getReg()) {
OpenRanges.clear();
continue;
}
// If this fragment overlaps with any open ranges, truncate them.
const DIExpression *DIExpr = Begin->getDebugExpression();
auto Last = remove_if(OpenRanges, [&](DebugLocEntry::Value R) {
return DIExpr->fragmentsOverlap(R.getExpression());
});
OpenRanges.erase(Last, OpenRanges.end());
const MCSymbol *StartLabel = getLabelBeforeInsn(Begin);
assert(StartLabel && "Forgot label before DBG_VALUE starting a range!");
const MCSymbol *EndLabel;
if (End != nullptr)
EndLabel = getLabelAfterInsn(End);
else if (std::next(I) == Ranges.end())
EndLabel = Asm->getFunctionEnd();
else
EndLabel = getLabelBeforeInsn(std::next(I)->first);
assert(EndLabel && "Forgot label after instruction ending a range!");
LLVM_DEBUG(dbgs() << "DotDebugLoc: " << *Begin << "\n");
auto Value = getDebugLocValue(Begin);
DebugLocEntry Loc(StartLabel, EndLabel, Value);
bool couldMerge = false;
// If this is a fragment, it may belong to the current DebugLocEntry.
if (DIExpr->isFragment()) {
// Add this value to the list of open ranges.
OpenRanges.push_back(Value);
// Attempt to add the fragment to the last entry.
if (!DebugLoc.empty())
if (DebugLoc.back().MergeValues(Loc))
couldMerge = true;
}
if (!couldMerge) {
// Need to add a new DebugLocEntry. Add all values from still
// valid non-overlapping fragments.
if (OpenRanges.size())
Loc.addValues(OpenRanges);
DebugLoc.push_back(std::move(Loc));
}
// Attempt to coalesce the ranges of two otherwise identical
// DebugLocEntries.
auto CurEntry = DebugLoc.rbegin();
LLVM_DEBUG({
dbgs() << CurEntry->getValues().size() << " Values:\n";
for (auto &Value : CurEntry->getValues())
Value.dump();
dbgs() << "-----\n";
});
auto PrevEntry = std::next(CurEntry);
if (PrevEntry != DebugLoc.rend() && PrevEntry->MergeRanges(*CurEntry))
DebugLoc.pop_back();
}
}
DbgVariable *DwarfDebug::createConcreteVariable(DwarfCompileUnit &TheCU,
LexicalScope &Scope,
InlinedVariable IV) {
ensureAbstractVariableIsCreatedIfScoped(TheCU, IV, Scope.getScopeNode());
ConcreteVariables.push_back(
llvm::make_unique<DbgVariable>(IV.first, IV.second));
InfoHolder.addScopeVariable(&Scope, ConcreteVariables.back().get());
return ConcreteVariables.back().get();
}
/// Determine whether a *singular* DBG_VALUE is valid for the entirety of its
/// enclosing lexical scope. The check ensures there are no other instructions
/// in the same lexical scope preceding the DBG_VALUE and that its range is
/// either open or otherwise rolls off the end of the scope.
static bool validThroughout(LexicalScopes &LScopes,
const MachineInstr *DbgValue,
const MachineInstr *RangeEnd) {
assert(DbgValue->getDebugLoc() && "DBG_VALUE without a debug location");
auto MBB = DbgValue->getParent();
auto DL = DbgValue->getDebugLoc();
auto *LScope = LScopes.findLexicalScope(DL);
// Scope doesn't exist; this is a dead DBG_VALUE.
if (!LScope)
return false;
auto &LSRange = LScope->getRanges();
if (LSRange.size() == 0)
return false;
// Determine if the DBG_VALUE is valid at the beginning of its lexical block.
const MachineInstr *LScopeBegin = LSRange.front().first;
// Early exit if the lexical scope begins outside of the current block.
if (LScopeBegin->getParent() != MBB)
return false;
MachineBasicBlock::const_reverse_iterator Pred(DbgValue);
for (++Pred; Pred != MBB->rend(); ++Pred) {
if (Pred->getFlag(MachineInstr::FrameSetup))
break;
auto PredDL = Pred->getDebugLoc();
if (!PredDL || Pred->isMetaInstruction())
continue;
// Check whether the instruction preceding the DBG_VALUE is in the same
// (sub)scope as the DBG_VALUE.
if (DL->getScope() == PredDL->getScope())
return false;
auto *PredScope = LScopes.findLexicalScope(PredDL);
if (!PredScope || LScope->dominates(PredScope))
return false;
}
// If the range of the DBG_VALUE is open-ended, report success.
if (!RangeEnd)
return true;
// Fail if there are instructions belonging to our scope in another block.
const MachineInstr *LScopeEnd = LSRange.back().second;
if (LScopeEnd->getParent() != MBB)
return false;
// Single, constant DBG_VALUEs in the prologue are promoted to be live
// throughout the function. This is a hack, presumably for DWARF v2 and not
// necessarily correct. It would be much better to use a dbg.declare instead
// if we know the constant is live throughout the scope.
if (DbgValue->getOperand(0).isImm() && MBB->pred_empty())
return true;
return false;
}
// Find variables for each lexical scope.
void DwarfDebug::collectVariableInfo(DwarfCompileUnit &TheCU,
const DISubprogram *SP,
DenseSet<InlinedVariable> &Processed) {
// Grab the variable info that was squirreled away in the MMI side-table.
collectVariableInfoFromMFTable(TheCU, Processed);
for (const auto &I : DbgValues) {
InlinedVariable IV = I.first;
if (Processed.count(IV))
continue;
// Instruction ranges, specifying where IV is accessible.
const auto &Ranges = I.second;
if (Ranges.empty())
continue;
LexicalScope *Scope = nullptr;
if (const DILocation *IA = IV.second)
Scope = LScopes.findInlinedScope(IV.first->getScope(), IA);
else
Scope = LScopes.findLexicalScope(IV.first->getScope());
// If variable scope is not found then skip this variable.
if (!Scope)
continue;
Processed.insert(IV);
DbgVariable *RegVar = createConcreteVariable(TheCU, *Scope, IV);
const MachineInstr *MInsn = Ranges.front().first;
assert(MInsn->isDebugValue() && "History must begin with debug value");
// Check if there is a single DBG_VALUE, valid throughout the var's scope.
if (Ranges.size() == 1 &&
validThroughout(LScopes, MInsn, Ranges.front().second)) {
RegVar->initializeDbgValue(MInsn);
continue;
}
// Do not emit location lists if .debug_loc secton is disabled.
if (!useLocSection())
continue;
// Handle multiple DBG_VALUE instructions describing one variable.
DebugLocStream::ListBuilder List(DebugLocs, TheCU, *Asm, *RegVar, *MInsn);
// Build the location list for this variable.
SmallVector<DebugLocEntry, 8> Entries;
buildLocationList(Entries, Ranges);
// If the variable has a DIBasicType, extract it. Basic types cannot have
// unique identifiers, so don't bother resolving the type with the
// identifier map.
const DIBasicType *BT = dyn_cast<DIBasicType>(
static_cast<const Metadata *>(IV.first->getType()));
// Finalize the entry by lowering it into a DWARF bytestream.
for (auto &Entry : Entries)
Entry.finalize(*Asm, List, BT);
}
// Collect info for variables that were optimized out.
for (const DINode *DN : SP->getRetainedNodes()) {
if (auto *DV = dyn_cast<DILocalVariable>(DN)) {
if (Processed.insert(InlinedVariable(DV, nullptr)).second)
if (LexicalScope *Scope = LScopes.findLexicalScope(DV->getScope()))
createConcreteVariable(TheCU, *Scope, InlinedVariable(DV, nullptr));
}
}
}
// Process beginning of an instruction.
void DwarfDebug::beginInstruction(const MachineInstr *MI) {
DebugHandlerBase::beginInstruction(MI);
assert(CurMI);
const auto *SP = MI->getMF()->getFunction().getSubprogram();
if (!SP || SP->getUnit()->getEmissionKind() == DICompileUnit::NoDebug)
return;
// Check if source location changes, but ignore DBG_VALUE and CFI locations.
// If the instruction is part of the function frame setup code, do not emit
// any line record, as there is no correspondence with any user code.
if (MI->isMetaInstruction() || MI->getFlag(MachineInstr::FrameSetup))
return;
const DebugLoc &DL = MI->getDebugLoc();
// When we emit a line-0 record, we don't update PrevInstLoc; so look at
// the last line number actually emitted, to see if it was line 0.
unsigned LastAsmLine =
Asm->OutStreamer->getContext().getCurrentDwarfLoc().getLine();
if (DL == PrevInstLoc) {
// If we have an ongoing unspecified location, nothing to do here.
if (!DL)
return;
// We have an explicit location, same as the previous location.
// But we might be coming back to it after a line 0 record.
if (LastAsmLine == 0 && DL.getLine() != 0) {
// Reinstate the source location but not marked as a statement.
const MDNode *Scope = DL.getScope();
recordSourceLine(DL.getLine(), DL.getCol(), Scope, /*Flags=*/0);
}
return;
}
if (!DL) {
// We have an unspecified location, which might want to be line 0.
// If we have already emitted a line-0 record, don't repeat it.
if (LastAsmLine == 0)
return;
// If user said Don't Do That, don't do that.
if (UnknownLocations == Disable)
return;
// See if we have a reason to emit a line-0 record now.
// Reasons to emit a line-0 record include:
// - User asked for it (UnknownLocations).
// - Instruction has a label, so it's referenced from somewhere else,
// possibly debug information; we want it to have a source location.
// - Instruction is at the top of a block; we don't want to inherit the
// location from the physically previous (maybe unrelated) block.
if (UnknownLocations == Enable || PrevLabel ||
(PrevInstBB && PrevInstBB != MI->getParent())) {
// Preserve the file and column numbers, if we can, to save space in
// the encoded line table.
// Do not update PrevInstLoc, it remembers the last non-0 line.
const MDNode *Scope = nullptr;
unsigned Column = 0;
if (PrevInstLoc) {
Scope = PrevInstLoc.getScope();
Column = PrevInstLoc.getCol();
}
recordSourceLine(/*Line=*/0, Column, Scope, /*Flags=*/0);
}
return;
}
// We have an explicit location, different from the previous location.
// Don't repeat a line-0 record, but otherwise emit the new location.
// (The new location might be an explicit line 0, which we do emit.)
if (PrevInstLoc && DL.getLine() == 0 && LastAsmLine == 0)
return;
unsigned Flags = 0;
if (DL == PrologEndLoc) {
Flags |= DWARF2_FLAG_PROLOGUE_END | DWARF2_FLAG_IS_STMT;
PrologEndLoc = DebugLoc();
}
// If the line changed, we call that a new statement; unless we went to
// line 0 and came back, in which case it is not a new statement.
unsigned OldLine = PrevInstLoc ? PrevInstLoc.getLine() : LastAsmLine;
if (DL.getLine() && DL.getLine() != OldLine)
Flags |= DWARF2_FLAG_IS_STMT;
const MDNode *Scope = DL.getScope();
recordSourceLine(DL.getLine(), DL.getCol(), Scope, Flags);
// If we're not at line 0, remember this location.
if (DL.getLine())
PrevInstLoc = DL;
}
static DebugLoc findPrologueEndLoc(const MachineFunction *MF) {
// First known non-DBG_VALUE and non-frame setup location marks
// the beginning of the function body.
for (const auto &MBB : *MF)
for (const auto &MI : MBB)
if (!MI.isMetaInstruction() && !MI.getFlag(MachineInstr::FrameSetup) &&
MI.getDebugLoc())
return MI.getDebugLoc();
return DebugLoc();
}
// Gather pre-function debug information. Assumes being called immediately
// after the function entry point has been emitted.
void DwarfDebug::beginFunctionImpl(const MachineFunction *MF) {
CurFn = MF;
auto *SP = MF->getFunction().getSubprogram();
assert(LScopes.empty() || SP == LScopes.getCurrentFunctionScope()->getScopeNode());
if (SP->getUnit()->getEmissionKind() == DICompileUnit::NoDebug)
return;
DwarfCompileUnit &CU = getOrCreateDwarfCompileUnit(SP->getUnit());
// Set DwarfDwarfCompileUnitID in MCContext to the Compile Unit this function
// belongs to so that we add to the correct per-cu line table in the
// non-asm case.
if (Asm->OutStreamer->hasRawTextSupport())
// Use a single line table if we are generating assembly.
Asm->OutStreamer->getContext().setDwarfCompileUnitID(0);
else
Asm->OutStreamer->getContext().setDwarfCompileUnitID(CU.getUniqueID());
// Record beginning of function.
PrologEndLoc = findPrologueEndLoc(MF);
if (PrologEndLoc) {
// We'd like to list the prologue as "not statements" but GDB behaves
// poorly if we do that. Revisit this with caution/GDB (7.5+) testing.
auto *SP = PrologEndLoc->getInlinedAtScope()->getSubprogram();
recordSourceLine(SP->getScopeLine(), 0, SP, DWARF2_FLAG_IS_STMT);
}
}
void DwarfDebug::skippedNonDebugFunction() {
// If we don't have a subprogram for this function then there will be a hole
// in the range information. Keep note of this by setting the previously used
// section to nullptr.
PrevCU = nullptr;
CurFn = nullptr;
}
// Gather and emit post-function debug information.
void DwarfDebug::endFunctionImpl(const MachineFunction *MF) {
const DISubprogram *SP = MF->getFunction().getSubprogram();
assert(CurFn == MF &&
"endFunction should be called with the same function as beginFunction");
// Set DwarfDwarfCompileUnitID in MCContext to default value.
Asm->OutStreamer->getContext().setDwarfCompileUnitID(0);
LexicalScope *FnScope = LScopes.getCurrentFunctionScope();
assert(!FnScope || SP == FnScope->getScopeNode());
DwarfCompileUnit &TheCU = *CUMap.lookup(SP->getUnit());
DenseSet<InlinedVariable> ProcessedVars;
collectVariableInfo(TheCU, SP, ProcessedVars);
// Add the range of this function to the list of ranges for the CU.
TheCU.addRange(RangeSpan(Asm->getFunctionBegin(), Asm->getFunctionEnd()));
// Under -gmlt, skip building the subprogram if there are no inlined
// subroutines inside it. But with -fdebug-info-for-profiling, the subprogram
// is still needed as we need its source location.
if (!TheCU.getCUNode()->getDebugInfoForProfiling() &&
TheCU.getCUNode()->getEmissionKind() == DICompileUnit::LineTablesOnly &&
LScopes.getAbstractScopesList().empty() && !IsDarwin) {
assert(InfoHolder.getScopeVariables().empty());
PrevLabel = nullptr;
CurFn = nullptr;
return;
}
#ifndef NDEBUG
size_t NumAbstractScopes = LScopes.getAbstractScopesList().size();
#endif
// Construct abstract scopes.
for (LexicalScope *AScope : LScopes.getAbstractScopesList()) {
auto *SP = cast<DISubprogram>(AScope->getScopeNode());
for (const DINode *DN : SP->getRetainedNodes()) {
if (auto *DV = dyn_cast<DILocalVariable>(DN)) {
// Collect info for variables that were optimized out.
if (!ProcessedVars.insert(InlinedVariable(DV, nullptr)).second)
continue;
ensureAbstractVariableIsCreated(TheCU, InlinedVariable(DV, nullptr),
DV->getScope());
assert(LScopes.getAbstractScopesList().size() == NumAbstractScopes
&& "ensureAbstractVariableIsCreated inserted abstract scopes");
}
}
constructAbstractSubprogramScopeDIE(TheCU, AScope);
}
ProcessedSPNodes.insert(SP);
TheCU.constructSubprogramScopeDIE(SP, FnScope);
if (auto *SkelCU = TheCU.getSkeleton())
if (!LScopes.getAbstractScopesList().empty() &&
TheCU.getCUNode()->getSplitDebugInlining())
SkelCU->constructSubprogramScopeDIE(SP, FnScope);
// Clear debug info
// Ownership of DbgVariables is a bit subtle - ScopeVariables owns all the
// DbgVariables except those that are also in AbstractVariables (since they
// can be used cross-function)
InfoHolder.getScopeVariables().clear();
PrevLabel = nullptr;
CurFn = nullptr;
}
// Register a source line with debug info. Returns the unique label that was
// emitted and which provides correspondence to the source line list.
void DwarfDebug::recordSourceLine(unsigned Line, unsigned Col, const MDNode *S,
unsigned Flags) {
StringRef Fn;
unsigned FileNo = 1;
unsigned Discriminator = 0;
if (auto *Scope = cast_or_null<DIScope>(S)) {
Fn = Scope->getFilename();
if (Line != 0 && getDwarfVersion() >= 4)
if (auto *LBF = dyn_cast<DILexicalBlockFile>(Scope))
Discriminator = LBF->getDiscriminator();
unsigned CUID = Asm->OutStreamer->getContext().getDwarfCompileUnitID();
FileNo = static_cast<DwarfCompileUnit &>(*InfoHolder.getUnits()[CUID])
.getOrCreateSourceID(Scope->getFile());
}
Asm->OutStreamer->EmitDwarfLocDirective(FileNo, Line, Col, Flags, 0,
Discriminator, Fn);
}
//===----------------------------------------------------------------------===//
// Emit Methods
//===----------------------------------------------------------------------===//
// Emit the debug info section.
void DwarfDebug::emitDebugInfo() {
DwarfFile &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder;
Holder.emitUnits(/* UseOffsets */ false);
}
// Emit the abbreviation section.
void DwarfDebug::emitAbbreviations() {
DwarfFile &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder;
Holder.emitAbbrevs(Asm->getObjFileLowering().getDwarfAbbrevSection());
}
void DwarfDebug::emitStringOffsetsTableHeader() {
DwarfFile &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder;
Holder.getStringPool().emitStringOffsetsTableHeader(
*Asm, Asm->getObjFileLowering().getDwarfStrOffSection(),
Holder.getStringOffsetsStartSym());
}
template <typename AccelTableT>
void DwarfDebug::emitAccel(AccelTableT &Accel, MCSection *Section,
StringRef TableName) {
Asm->OutStreamer->SwitchSection(Section);
// Emit the full data.
emitAppleAccelTable(Asm, Accel, TableName, Section->getBeginSymbol());
}
void DwarfDebug::emitAccelDebugNames() {
// Don't emit anything if we have no compilation units to index.
if (getUnits().empty())
return;
Asm->OutStreamer->SwitchSection(
Asm->getObjFileLowering().getDwarfDebugNamesSection());
emitDWARF5AccelTable(Asm, AccelDebugNames, *this, getUnits());
}
// Emit visible names into a hashed accelerator table section.
void DwarfDebug::emitAccelNames() {
emitAccel(AccelNames, Asm->getObjFileLowering().getDwarfAccelNamesSection(),
"Names");
}
// Emit objective C classes and categories into a hashed accelerator table
// section.
void DwarfDebug::emitAccelObjC() {
emitAccel(AccelObjC, Asm->getObjFileLowering().getDwarfAccelObjCSection(),
"ObjC");
}
// Emit namespace dies into a hashed accelerator table.
void DwarfDebug::emitAccelNamespaces() {
emitAccel(AccelNamespace,
Asm->getObjFileLowering().getDwarfAccelNamespaceSection(),
"namespac");
}
// Emit type dies into a hashed accelerator table.
void DwarfDebug::emitAccelTypes() {
emitAccel(AccelTypes, Asm->getObjFileLowering().getDwarfAccelTypesSection(),
"types");
}
// Public name handling.
// The format for the various pubnames:
//
// dwarf pubnames - offset/name pairs where the offset is the offset into the CU
// for the DIE that is named.
//
// gnu pubnames - offset/index value/name tuples where the offset is the offset
// into the CU and the index value is computed according to the type of value
// for the DIE that is named.
//
// For type units the offset is the offset of the skeleton DIE. For split dwarf
// it's the offset within the debug_info/debug_types dwo section, however, the
// reference in the pubname header doesn't change.
/// computeIndexValue - Compute the gdb index value for the DIE and CU.
static dwarf::PubIndexEntryDescriptor computeIndexValue(DwarfUnit *CU,
const DIE *Die) {
// Entities that ended up only in a Type Unit reference the CU instead (since
// the pub entry has offsets within the CU there's no real offset that can be
// provided anyway). As it happens all such entities (namespaces and types,
// types only in C++ at that) are rendered as TYPE+EXTERNAL. If this turns out
// not to be true it would be necessary to persist this information from the
// point at which the entry is added to the index data structure - since by
// the time the index is built from that, the original type/namespace DIE in a
// type unit has already been destroyed so it can't be queried for properties
// like tag, etc.
if (Die->getTag() == dwarf::DW_TAG_compile_unit)
return dwarf::PubIndexEntryDescriptor(dwarf::GIEK_TYPE,
dwarf::GIEL_EXTERNAL);
dwarf::GDBIndexEntryLinkage Linkage = dwarf::GIEL_STATIC;
// We could have a specification DIE that has our most of our knowledge,
// look for that now.
if (DIEValue SpecVal = Die->findAttribute(dwarf::DW_AT_specification)) {
DIE &SpecDIE = SpecVal.getDIEEntry().getEntry();
if (SpecDIE.findAttribute(dwarf::DW_AT_external))
Linkage = dwarf::GIEL_EXTERNAL;
} else if (Die->findAttribute(dwarf::DW_AT_external))
Linkage = dwarf::GIEL_EXTERNAL;
switch (Die->getTag()) {
case dwarf::DW_TAG_class_type:
case dwarf::DW_TAG_structure_type:
case dwarf::DW_TAG_union_type:
case dwarf::DW_TAG_enumeration_type:
return dwarf::PubIndexEntryDescriptor(
dwarf::GIEK_TYPE, CU->getLanguage() != dwarf::DW_LANG_C_plus_plus
? dwarf::GIEL_STATIC
: dwarf::GIEL_EXTERNAL);
case dwarf::DW_TAG_typedef:
case dwarf::DW_TAG_base_type:
case dwarf::DW_TAG_subrange_type:
return dwarf::PubIndexEntryDescriptor(dwarf::GIEK_TYPE, dwarf::GIEL_STATIC);
case dwarf::DW_TAG_namespace:
return dwarf::GIEK_TYPE;
case dwarf::DW_TAG_subprogram:
return dwarf::PubIndexEntryDescriptor(dwarf::GIEK_FUNCTION, Linkage);
case dwarf::DW_TAG_variable:
return dwarf::PubIndexEntryDescriptor(dwarf::GIEK_VARIABLE, Linkage);
case dwarf::DW_TAG_enumerator:
return dwarf::PubIndexEntryDescriptor(dwarf::GIEK_VARIABLE,
dwarf::GIEL_STATIC);
default:
return dwarf::GIEK_NONE;
}
}
/// emitDebugPubSections - Emit visible names and types into debug pubnames and
/// pubtypes sections.
void DwarfDebug::emitDebugPubSections() {
for (const auto &NU : CUMap) {
DwarfCompileUnit *TheU = NU.second;
if (!TheU->hasDwarfPubSections())
continue;
bool GnuStyle = TheU->getCUNode()->getGnuPubnames();
Asm->OutStreamer->SwitchSection(
GnuStyle ? Asm->getObjFileLowering().getDwarfGnuPubNamesSection()
: Asm->getObjFileLowering().getDwarfPubNamesSection());
emitDebugPubSection(GnuStyle, "Names", TheU, TheU->getGlobalNames());
Asm->OutStreamer->SwitchSection(
GnuStyle ? Asm->getObjFileLowering().getDwarfGnuPubTypesSection()
: Asm->getObjFileLowering().getDwarfPubTypesSection());
emitDebugPubSection(GnuStyle, "Types", TheU, TheU->getGlobalTypes());
}
}
void DwarfDebug::emitSectionReference(const DwarfCompileUnit &CU) {
if (useSectionsAsReferences())
Asm->EmitDwarfOffset(CU.getSection()->getBeginSymbol(),
CU.getDebugSectionOffset());
else
Asm->emitDwarfSymbolReference(CU.getLabelBegin());
}
void DwarfDebug::emitDebugPubSection(bool GnuStyle, StringRef Name,
DwarfCompileUnit *TheU,
const StringMap<const DIE *> &Globals) {
if (auto *Skeleton = TheU->getSkeleton())
TheU = Skeleton;
// Emit the header.
Asm->OutStreamer->AddComment("Length of Public " + Name + " Info");
MCSymbol *BeginLabel = Asm->createTempSymbol("pub" + Name + "_begin");
MCSymbol *EndLabel = Asm->createTempSymbol("pub" + Name + "_end");
Asm->EmitLabelDifference(EndLabel, BeginLabel, 4);
Asm->OutStreamer->EmitLabel(BeginLabel);
Asm->OutStreamer->AddComment("DWARF Version");
Asm->emitInt16(dwarf::DW_PUBNAMES_VERSION);
Asm->OutStreamer->AddComment("Offset of Compilation Unit Info");
emitSectionReference(*TheU);
Asm->OutStreamer->AddComment("Compilation Unit Length");
Asm->emitInt32(TheU->getLength());
// Emit the pubnames for this compilation unit.
for (const auto &GI : Globals) {
const char *Name = GI.getKeyData();
const DIE *Entity = GI.second;
Asm->OutStreamer->AddComment("DIE offset");
Asm->emitInt32(Entity->getOffset());
if (GnuStyle) {
dwarf::PubIndexEntryDescriptor Desc = computeIndexValue(TheU, Entity);
Asm->OutStreamer->AddComment(
Twine("Kind: ") + dwarf::GDBIndexEntryKindString(Desc.Kind) + ", " +
dwarf::GDBIndexEntryLinkageString(Desc.Linkage));
Asm->emitInt8(Desc.toBits());
}
Asm->OutStreamer->AddComment("External Name");
Asm->OutStreamer->EmitBytes(StringRef(Name, GI.getKeyLength() + 1));
}
Asm->OutStreamer->AddComment("End Mark");
Asm->emitInt32(0);
Asm->OutStreamer->EmitLabel(EndLabel);
}
/// Emit null-terminated strings into a debug str section.
void DwarfDebug::emitDebugStr() {
MCSection *StringOffsetsSection = nullptr;
if (useSegmentedStringOffsetsTable()) {
emitStringOffsetsTableHeader();
StringOffsetsSection = Asm->getObjFileLowering().getDwarfStrOffSection();
}
DwarfFile &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder;
Holder.emitStrings(Asm->getObjFileLowering().getDwarfStrSection(),
StringOffsetsSection, /* UseRelativeOffsets = */ true);
}
void DwarfDebug::emitDebugLocEntry(ByteStreamer &Streamer,
const DebugLocStream::Entry &Entry) {
auto &&Comments = DebugLocs.getComments(Entry);
auto Comment = Comments.begin();
auto End = Comments.end();
for (uint8_t Byte : DebugLocs.getBytes(Entry))
Streamer.EmitInt8(Byte, Comment != End ? *(Comment++) : "");
}
static void emitDebugLocValue(const AsmPrinter &AP, const DIBasicType *BT,
const DebugLocEntry::Value &Value,
DwarfExpression &DwarfExpr) {
auto *DIExpr = Value.getExpression();
DIExpressionCursor ExprCursor(DIExpr);
DwarfExpr.addFragmentOffset(DIExpr);
// Regular entry.
if (Value.isInt()) {
if (BT && (BT->getEncoding() == dwarf::DW_ATE_signed ||
BT->getEncoding() == dwarf::DW_ATE_signed_char))
DwarfExpr.addSignedConstant(Value.getInt());
else
DwarfExpr.addUnsignedConstant(Value.getInt());
} else if (Value.isLocation()) {
MachineLocation Location = Value.getLoc();
if (Location.isIndirect())
DwarfExpr.setMemoryLocationKind();
DIExpressionCursor Cursor(DIExpr);
const TargetRegisterInfo &TRI = *AP.MF->getSubtarget().getRegisterInfo();
if (!DwarfExpr.addMachineRegExpression(TRI, Cursor, Location.getReg()))
return;
return DwarfExpr.addExpression(std::move(Cursor));
} else if (Value.isConstantFP()) {
APInt RawBytes = Value.getConstantFP()->getValueAPF().bitcastToAPInt();
DwarfExpr.addUnsignedConstant(RawBytes);
}
DwarfExpr.addExpression(std::move(ExprCursor));
}
void DebugLocEntry::finalize(const AsmPrinter &AP,
DebugLocStream::ListBuilder &List,
const DIBasicType *BT) {
DebugLocStream::EntryBuilder Entry(List, Begin, End);
BufferByteStreamer Streamer = Entry.getStreamer();
DebugLocDwarfExpression DwarfExpr(AP.getDwarfVersion(), Streamer);
const DebugLocEntry::Value &Value = Values[0];
if (Value.isFragment()) {
// Emit all fragments that belong to the same variable and range.
assert(llvm::all_of(Values, [](DebugLocEntry::Value P) {
return P.isFragment();
}) && "all values are expected to be fragments");
assert(std::is_sorted(Values.begin(), Values.end()) &&
"fragments are expected to be sorted");
for (auto Fragment : Values)
emitDebugLocValue(AP, BT, Fragment, DwarfExpr);
} else {
assert(Values.size() == 1 && "only fragments may have >1 value");
emitDebugLocValue(AP, BT, Value, DwarfExpr);
}
DwarfExpr.finalize();
}
void DwarfDebug::emitDebugLocEntryLocation(const DebugLocStream::Entry &Entry) {
// Emit the size.
Asm->OutStreamer->AddComment("Loc expr size");
Asm->emitInt16(DebugLocs.getBytes(Entry).size());
// Emit the entry.
APByteStreamer Streamer(*Asm);
emitDebugLocEntry(Streamer, Entry);
}
// Emit locations into the debug loc section.
void DwarfDebug::emitDebugLoc() {
if (DebugLocs.getLists().empty())
return;
// Start the dwarf loc section.
Asm->OutStreamer->SwitchSection(
Asm->getObjFileLowering().getDwarfLocSection());
unsigned char Size = Asm->MAI->getCodePointerSize();
for (const auto &List : DebugLocs.getLists()) {
Asm->OutStreamer->EmitLabel(List.Label);
const DwarfCompileUnit *CU = List.CU;
for (const auto &Entry : DebugLocs.getEntries(List)) {
// Set up the range. This range is relative to the entry point of the
// compile unit. This is a hard coded 0 for low_pc when we're emitting
// ranges, or the DW_AT_low_pc on the compile unit otherwise.
if (auto *Base = CU->getBaseAddress()) {
Asm->EmitLabelDifference(Entry.BeginSym, Base, Size);
Asm->EmitLabelDifference(Entry.EndSym, Base, Size);
} else {
Asm->OutStreamer->EmitSymbolValue(Entry.BeginSym, Size);
Asm->OutStreamer->EmitSymbolValue(Entry.EndSym, Size);
}
emitDebugLocEntryLocation(Entry);
}
Asm->OutStreamer->EmitIntValue(0, Size);
Asm->OutStreamer->EmitIntValue(0, Size);
}
}
void DwarfDebug::emitDebugLocDWO() {
Asm->OutStreamer->SwitchSection(
Asm->getObjFileLowering().getDwarfLocDWOSection());
for (const auto &List : DebugLocs.getLists()) {
Asm->OutStreamer->EmitLabel(List.Label);
for (const auto &Entry : DebugLocs.getEntries(List)) {
// Just always use start_length for now - at least that's one address
// rather than two. We could get fancier and try to, say, reuse an
// address we know we've emitted elsewhere (the start of the function?
// The start of the CU or CU subrange that encloses this range?)
Asm->emitInt8(dwarf::DW_LLE_startx_length);
unsigned idx = AddrPool.getIndex(Entry.BeginSym);
Asm->EmitULEB128(idx);
Asm->EmitLabelDifference(Entry.EndSym, Entry.BeginSym, 4);
emitDebugLocEntryLocation(Entry);
}
Asm->emitInt8(dwarf::DW_LLE_end_of_list);
}
}
struct ArangeSpan {
const MCSymbol *Start, *End;
};
// Emit a debug aranges section, containing a CU lookup for any
// address we can tie back to a CU.
void DwarfDebug::emitDebugARanges() {
// Provides a unique id per text section.
MapVector<MCSection *, SmallVector<SymbolCU, 8>> SectionMap;
// Filter labels by section.
for (const SymbolCU &SCU : ArangeLabels) {
if (SCU.Sym->isInSection()) {
// Make a note of this symbol and it's section.
MCSection *Section = &SCU.Sym->getSection();
if (!Section->getKind().isMetadata())
SectionMap[Section].push_back(SCU);
} else {
// Some symbols (e.g. common/bss on mach-o) can have no section but still
// appear in the output. This sucks as we rely on sections to build
// arange spans. We can do it without, but it's icky.
SectionMap[nullptr].push_back(SCU);
}
}
DenseMap<DwarfCompileUnit *, std::vector<ArangeSpan>> Spans;
for (auto &I : SectionMap) {
MCSection *Section = I.first;
SmallVector<SymbolCU, 8> &List = I.second;
if (List.size() < 1)
continue;
// If we have no section (e.g. common), just write out
// individual spans for each symbol.
if (!Section) {
for (const SymbolCU &Cur : List) {
ArangeSpan Span;
Span.Start = Cur.Sym;
Span.End = nullptr;
assert(Cur.CU);
Spans[Cur.CU].push_back(Span);
}
continue;
}
// Sort the symbols by offset within the section.
std::stable_sort(
List.begin(), List.end(), [&](const SymbolCU &A, const SymbolCU &B) {
unsigned IA = A.Sym ? Asm->OutStreamer->GetSymbolOrder(A.Sym) : 0;
unsigned IB = B.Sym ? Asm->OutStreamer->GetSymbolOrder(B.Sym) : 0;
// Symbols with no order assigned should be placed at the end.
// (e.g. section end labels)
if (IA == 0)
return false;
if (IB == 0)
return true;
return IA < IB;
});
// Insert a final terminator.
List.push_back(SymbolCU(nullptr, Asm->OutStreamer->endSection(Section)));
// Build spans between each label.
const MCSymbol *StartSym = List[0].Sym;
for (size_t n = 1, e = List.size(); n < e; n++) {
const SymbolCU &Prev = List[n - 1];
const SymbolCU &Cur = List[n];
// Try and build the longest span we can within the same CU.
if (Cur.CU != Prev.CU) {
ArangeSpan Span;
Span.Start = StartSym;
Span.End = Cur.Sym;
assert(Prev.CU);
Spans[Prev.CU].push_back(Span);
StartSym = Cur.Sym;
}
}
}
// Start the dwarf aranges section.
Asm->OutStreamer->SwitchSection(
Asm->getObjFileLowering().getDwarfARangesSection());
unsigned PtrSize = Asm->MAI->getCodePointerSize();
// Build a list of CUs used.
std::vector<DwarfCompileUnit *> CUs;
for (const auto &it : Spans) {
DwarfCompileUnit *CU = it.first;
CUs.push_back(CU);
}
// Sort the CU list (again, to ensure consistent output order).
llvm::sort(CUs.begin(), CUs.end(),
[](const DwarfCompileUnit *A, const DwarfCompileUnit *B) {
return A->getUniqueID() < B->getUniqueID();
});
// Emit an arange table for each CU we used.
for (DwarfCompileUnit *CU : CUs) {
std::vector<ArangeSpan> &List = Spans[CU];
// Describe the skeleton CU's offset and length, not the dwo file's.
if (auto *Skel = CU->getSkeleton())
CU = Skel;
// Emit size of content not including length itself.
unsigned ContentSize =
sizeof(int16_t) + // DWARF ARange version number
sizeof(int32_t) + // Offset of CU in the .debug_info section
sizeof(int8_t) + // Pointer Size (in bytes)
sizeof(int8_t); // Segment Size (in bytes)
unsigned TupleSize = PtrSize * 2;
// 7.20 in the Dwarf specs requires the table to be aligned to a tuple.
unsigned Padding =
OffsetToAlignment(sizeof(int32_t) + ContentSize, TupleSize);
ContentSize += Padding;
ContentSize += (List.size() + 1) * TupleSize;
// For each compile unit, write the list of spans it covers.
Asm->OutStreamer->AddComment("Length of ARange Set");
Asm->emitInt32(ContentSize);
Asm->OutStreamer->AddComment("DWARF Arange version number");
Asm->emitInt16(dwarf::DW_ARANGES_VERSION);
Asm->OutStreamer->AddComment("Offset Into Debug Info Section");
emitSectionReference(*CU);
Asm->OutStreamer->AddComment("Address Size (in bytes)");
Asm->emitInt8(PtrSize);
Asm->OutStreamer->AddComment("Segment Size (in bytes)");
Asm->emitInt8(0);
Asm->OutStreamer->emitFill(Padding, 0xff);
for (const ArangeSpan &Span : List) {
Asm->EmitLabelReference(Span.Start, PtrSize);
// Calculate the size as being from the span start to it's end.
if (Span.End) {
Asm->EmitLabelDifference(Span.End, Span.Start, PtrSize);
} else {
// For symbols without an end marker (e.g. common), we
// write a single arange entry containing just that one symbol.
uint64_t Size = SymSize[Span.Start];
if (Size == 0)
Size = 1;
Asm->OutStreamer->EmitIntValue(Size, PtrSize);
}
}
Asm->OutStreamer->AddComment("ARange terminator");
Asm->OutStreamer->EmitIntValue(0, PtrSize);
Asm->OutStreamer->EmitIntValue(0, PtrSize);
}
}
/// Emit a single range list. We handle both DWARF v5 and earlier.
static void emitRangeList(AsmPrinter *Asm, DwarfCompileUnit *CU,
const RangeSpanList &List) {
auto DwarfVersion = CU->getDwarfVersion();
// Emit our symbol so we can find the beginning of the range.
Asm->OutStreamer->EmitLabel(List.getSym());
// Gather all the ranges that apply to the same section so they can share
// a base address entry.
MapVector<const MCSection *, std::vector<const RangeSpan *>> SectionRanges;
// Size for our labels.
auto Size = Asm->MAI->getCodePointerSize();
for (const RangeSpan &Range : List.getRanges())
SectionRanges[&Range.getStart()->getSection()].push_back(&Range);
auto *CUBase = CU->getBaseAddress();
bool BaseIsSet = false;
for (const auto &P : SectionRanges) {
// Don't bother with a base address entry if there's only one range in
// this section in this range list - for example ranges for a CU will
// usually consist of single regions from each of many sections
// (-ffunction-sections, or just C++ inline functions) except under LTO
// or optnone where there may be holes in a single CU's section
// contributions.
auto *Base = CUBase;
if (!Base && P.second.size() > 1 &&
(UseDwarfRangesBaseAddressSpecifier || DwarfVersion >= 5)) {
BaseIsSet = true;
// FIXME/use care: This may not be a useful base address if it's not
// the lowest address/range in this object.
Base = P.second.front()->getStart();
if (DwarfVersion >= 5) {
Asm->OutStreamer->AddComment("DW_RLE_base_address");
Asm->OutStreamer->EmitIntValue(dwarf::DW_RLE_base_address, 1);
} else
Asm->OutStreamer->EmitIntValue(-1, Size);
Asm->OutStreamer->AddComment(" base address");
Asm->OutStreamer->EmitSymbolValue(Base, Size);
} else if (BaseIsSet && DwarfVersion < 5) {
BaseIsSet = false;
assert(!Base);
Asm->OutStreamer->EmitIntValue(-1, Size);
Asm->OutStreamer->EmitIntValue(0, Size);
}
for (const auto *RS : P.second) {
const MCSymbol *Begin = RS->getStart();
const MCSymbol *End = RS->getEnd();
assert(Begin && "Range without a begin symbol?");
assert(End && "Range without an end symbol?");
if (Base) {
if (DwarfVersion >= 5) {
// Emit DW_RLE_offset_pair when we have a base.
Asm->OutStreamer->AddComment("DW_RLE_offset_pair");
Asm->OutStreamer->EmitIntValue(dwarf::DW_RLE_offset_pair, 1);
Asm->OutStreamer->AddComment(" starting offset");
Asm->EmitLabelDifferenceAsULEB128(Begin, Base);
Asm->OutStreamer->AddComment(" ending offset");
Asm->EmitLabelDifferenceAsULEB128(End, Base);
} else {
Asm->EmitLabelDifference(Begin, Base, Size);
Asm->EmitLabelDifference(End, Base, Size);
}
} else if (DwarfVersion >= 5) {
Asm->OutStreamer->AddComment("DW_RLE_start_length");
Asm->OutStreamer->EmitIntValue(dwarf::DW_RLE_start_length, 1);
Asm->OutStreamer->AddComment(" start");
Asm->OutStreamer->EmitSymbolValue(Begin, Size);
Asm->OutStreamer->AddComment(" length");
Asm->EmitLabelDifferenceAsULEB128(End, Begin);
} else {
Asm->OutStreamer->EmitSymbolValue(Begin, Size);
Asm->OutStreamer->EmitSymbolValue(End, Size);
}
}
}
if (DwarfVersion >= 5) {
Asm->OutStreamer->AddComment("DW_RLE_end_of_list");
Asm->OutStreamer->EmitIntValue(dwarf::DW_RLE_end_of_list, 1);
} else {
// Terminate the list with two 0 values.
Asm->OutStreamer->EmitIntValue(0, Size);
Asm->OutStreamer->EmitIntValue(0, Size);
}
}
// Emit the header of a DWARF 5 range list table. Returns the symbol that
// designates the end of the table for the caller to emit when the table is
// complete.
static MCSymbol *emitRnglistsTableHeader(AsmPrinter *Asm, DwarfFile &Holder) {
// The length is described by a starting label right after the length field
// and an end label.
MCSymbol *TableStart = Asm->createTempSymbol("debug_rnglist_table_start");
MCSymbol *TableEnd = Asm->createTempSymbol("debug_rnglist_table_end");
// Build the range table header, which starts with the length field.
Asm->EmitLabelDifference(TableEnd, TableStart, 4);
Asm->OutStreamer->EmitLabel(TableStart);
// Version number (DWARF v5 and later).
Asm->emitInt16(Asm->OutStreamer->getContext().getDwarfVersion());
// Address size.
Asm->emitInt8(Asm->MAI->getCodePointerSize());
// Segment selector size.
Asm->emitInt8(0);
MCSymbol *RnglistTableBaseSym = Holder.getRnglistsTableBaseSym();
// FIXME: Generate the offsets table and use DW_FORM_rnglistx with the
// DW_AT_ranges attribute. Until then set the number of offsets to 0.
Asm->emitInt32(0);
Asm->OutStreamer->EmitLabel(RnglistTableBaseSym);
return TableEnd;
}
/// Emit address ranges into the .debug_ranges section or into the DWARF v5
/// .debug_rnglists section.
void DwarfDebug::emitDebugRanges() {
if (CUMap.empty())
return;
auto NoRangesPresent = [this]() {
return llvm::all_of(
- CUMap, [](const decltype(CUMap)::const_iterator::value_type &Pair) {
+ CUMap, [](const decltype(CUMap)::value_type &Pair) {
return Pair.second->getRangeLists().empty();
});
};
if (!useRangesSection()) {
assert(NoRangesPresent() && "No debug ranges expected.");
return;
}
if (NoRangesPresent())
return;
// Start the dwarf ranges section.
MCSymbol *TableEnd = nullptr;
if (getDwarfVersion() >= 5) {
Asm->OutStreamer->SwitchSection(
Asm->getObjFileLowering().getDwarfRnglistsSection());
TableEnd = emitRnglistsTableHeader(Asm, useSplitDwarf() ? SkeletonHolder
: InfoHolder);
} else
Asm->OutStreamer->SwitchSection(
Asm->getObjFileLowering().getDwarfRangesSection());
// Grab the specific ranges for the compile units in the module.
for (const auto &I : CUMap) {
DwarfCompileUnit *TheCU = I.second;
if (auto *Skel = TheCU->getSkeleton())
TheCU = Skel;
// Iterate over the misc ranges for the compile units in the module.
for (const RangeSpanList &List : TheCU->getRangeLists())
emitRangeList(Asm, TheCU, List);
}
if (TableEnd)
Asm->OutStreamer->EmitLabel(TableEnd);
}
void DwarfDebug::handleMacroNodes(DIMacroNodeArray Nodes, DwarfCompileUnit &U) {
for (auto *MN : Nodes) {
if (auto *M = dyn_cast<DIMacro>(MN))
emitMacro(*M);
else if (auto *F = dyn_cast<DIMacroFile>(MN))
emitMacroFile(*F, U);
else
llvm_unreachable("Unexpected DI type!");
}
}
void DwarfDebug::emitMacro(DIMacro &M) {
Asm->EmitULEB128(M.getMacinfoType());
Asm->EmitULEB128(M.getLine());
StringRef Name = M.getName();
StringRef Value = M.getValue();
Asm->OutStreamer->EmitBytes(Name);
if (!Value.empty()) {
// There should be one space between macro name and macro value.
Asm->emitInt8(' ');
Asm->OutStreamer->EmitBytes(Value);
}
Asm->emitInt8('\0');
}
void DwarfDebug::emitMacroFile(DIMacroFile &F, DwarfCompileUnit &U) {
assert(F.getMacinfoType() == dwarf::DW_MACINFO_start_file);
Asm->EmitULEB128(dwarf::DW_MACINFO_start_file);
Asm->EmitULEB128(F.getLine());
Asm->EmitULEB128(U.getOrCreateSourceID(F.getFile()));
handleMacroNodes(F.getElements(), U);
Asm->EmitULEB128(dwarf::DW_MACINFO_end_file);
}
/// Emit macros into a debug macinfo section.
void DwarfDebug::emitDebugMacinfo() {
if (CUMap.empty())
return;
// Start the dwarf macinfo section.
Asm->OutStreamer->SwitchSection(
Asm->getObjFileLowering().getDwarfMacinfoSection());
for (const auto &P : CUMap) {
auto &TheCU = *P.second;
auto *SkCU = TheCU.getSkeleton();
DwarfCompileUnit &U = SkCU ? *SkCU : TheCU;
auto *CUNode = cast<DICompileUnit>(P.first);
DIMacroNodeArray Macros = CUNode->getMacros();
if (!Macros.empty()) {
Asm->OutStreamer->EmitLabel(U.getMacroLabelBegin());
handleMacroNodes(Macros, U);
}
}
Asm->OutStreamer->AddComment("End Of Macro List Mark");
Asm->emitInt8(0);
}
// DWARF5 Experimental Separate Dwarf emitters.
void DwarfDebug::initSkeletonUnit(const DwarfUnit &U, DIE &Die,
std::unique_ptr<DwarfCompileUnit> NewU) {
NewU->addString(Die, dwarf::DW_AT_GNU_dwo_name,
Asm->TM.Options.MCOptions.SplitDwarfFile);
if (!CompilationDir.empty())
NewU->addString(Die, dwarf::DW_AT_comp_dir, CompilationDir);
addGnuPubAttributes(*NewU, Die);
SkeletonHolder.addUnit(std::move(NewU));
}
DwarfCompileUnit &DwarfDebug::constructSkeletonCU(const DwarfCompileUnit &CU) {
auto OwnedUnit = llvm::make_unique<DwarfCompileUnit>(
CU.getUniqueID(), CU.getCUNode(), Asm, this, &SkeletonHolder);
DwarfCompileUnit &NewCU = *OwnedUnit;
NewCU.setSection(Asm->getObjFileLowering().getDwarfInfoSection());
NewCU.initStmtList();
if (useSegmentedStringOffsetsTable())
NewCU.addStringOffsetsStart();
initSkeletonUnit(CU, NewCU.getUnitDie(), std::move(OwnedUnit));
return NewCU;
}
// Emit the .debug_info.dwo section for separated dwarf. This contains the
// compile units that would normally be in debug_info.
void DwarfDebug::emitDebugInfoDWO() {
assert(useSplitDwarf() && "No split dwarf debug info?");
// Don't emit relocations into the dwo file.
InfoHolder.emitUnits(/* UseOffsets */ true);
}
// Emit the .debug_abbrev.dwo section for separated dwarf. This contains the
// abbreviations for the .debug_info.dwo section.
void DwarfDebug::emitDebugAbbrevDWO() {
assert(useSplitDwarf() && "No split dwarf?");
InfoHolder.emitAbbrevs(Asm->getObjFileLowering().getDwarfAbbrevDWOSection());
}
void DwarfDebug::emitDebugLineDWO() {
assert(useSplitDwarf() && "No split dwarf?");
SplitTypeUnitFileTable.Emit(
*Asm->OutStreamer, MCDwarfLineTableParams(),
Asm->getObjFileLowering().getDwarfLineDWOSection());
}
void DwarfDebug::emitStringOffsetsTableHeaderDWO() {
assert(useSplitDwarf() && "No split dwarf?");
InfoHolder.getStringPool().emitStringOffsetsTableHeader(
*Asm, Asm->getObjFileLowering().getDwarfStrOffDWOSection(),
InfoHolder.getStringOffsetsStartSym());
}
// Emit the .debug_str.dwo section for separated dwarf. This contains the
// string section and is identical in format to traditional .debug_str
// sections.
void DwarfDebug::emitDebugStrDWO() {
if (useSegmentedStringOffsetsTable())
emitStringOffsetsTableHeaderDWO();
assert(useSplitDwarf() && "No split dwarf?");
MCSection *OffSec = Asm->getObjFileLowering().getDwarfStrOffDWOSection();
InfoHolder.emitStrings(Asm->getObjFileLowering().getDwarfStrDWOSection(),
OffSec, /* UseRelativeOffsets = */ false);
}
// Emit DWO addresses.
void DwarfDebug::emitDebugAddr() {
assert(useSplitDwarf() && "No split dwarf?");
AddrPool.emit(*Asm, Asm->getObjFileLowering().getDwarfAddrSection());
}
MCDwarfDwoLineTable *DwarfDebug::getDwoLineTable(const DwarfCompileUnit &CU) {
if (!useSplitDwarf())
return nullptr;
const DICompileUnit *DIUnit = CU.getCUNode();
SplitTypeUnitFileTable.maybeSetRootFile(
DIUnit->getDirectory(), DIUnit->getFilename(),
CU.getMD5AsBytes(DIUnit->getFile()), DIUnit->getSource());
return &SplitTypeUnitFileTable;
}
uint64_t DwarfDebug::makeTypeSignature(StringRef Identifier) {
MD5 Hash;
Hash.update(Identifier);
// ... take the least significant 8 bytes and return those. Our MD5
// implementation always returns its results in little endian, so we actually
// need the "high" word.
MD5::MD5Result Result;
Hash.final(Result);
return Result.high();
}
void DwarfDebug::addDwarfTypeUnitType(DwarfCompileUnit &CU,
StringRef Identifier, DIE &RefDie,
const DICompositeType *CTy) {
// Fast path if we're building some type units and one has already used the
// address pool we know we're going to throw away all this work anyway, so
// don't bother building dependent types.
if (!TypeUnitsUnderConstruction.empty() && AddrPool.hasBeenUsed())
return;
auto Ins = TypeSignatures.insert(std::make_pair(CTy, 0));
if (!Ins.second) {
CU.addDIETypeSignature(RefDie, Ins.first->second);
return;
}
bool TopLevelType = TypeUnitsUnderConstruction.empty();
AddrPool.resetUsedFlag();
auto OwnedUnit = llvm::make_unique<DwarfTypeUnit>(CU, Asm, this, &InfoHolder,
getDwoLineTable(CU));
DwarfTypeUnit &NewTU = *OwnedUnit;
DIE &UnitDie = NewTU.getUnitDie();
TypeUnitsUnderConstruction.emplace_back(std::move(OwnedUnit), CTy);
NewTU.addUInt(UnitDie, dwarf::DW_AT_language, dwarf::DW_FORM_data2,
CU.getLanguage());
uint64_t Signature = makeTypeSignature(Identifier);
NewTU.setTypeSignature(Signature);
Ins.first->second = Signature;
if (useSplitDwarf())
NewTU.setSection(Asm->getObjFileLowering().getDwarfTypesDWOSection());
else {
NewTU.setSection(Asm->getObjFileLowering().getDwarfTypesSection(Signature));
// Non-split type units reuse the compile unit's line table.
CU.applyStmtList(UnitDie);
}
// Add DW_AT_str_offsets_base to the type unit DIE, but not for split type
// units.
if (useSegmentedStringOffsetsTable() && !useSplitDwarf())
NewTU.addStringOffsetsStart();
NewTU.setType(NewTU.createTypeDIE(CTy));
if (TopLevelType) {
auto TypeUnitsToAdd = std::move(TypeUnitsUnderConstruction);
TypeUnitsUnderConstruction.clear();
// Types referencing entries in the address table cannot be placed in type
// units.
if (AddrPool.hasBeenUsed()) {
// Remove all the types built while building this type.
// This is pessimistic as some of these types might not be dependent on
// the type that used an address.
for (const auto &TU : TypeUnitsToAdd)
TypeSignatures.erase(TU.second);
// Construct this type in the CU directly.
// This is inefficient because all the dependent types will be rebuilt
// from scratch, including building them in type units, discovering that
// they depend on addresses, throwing them out and rebuilding them.
CU.constructTypeDIE(RefDie, cast<DICompositeType>(CTy));
return;
}
// If the type wasn't dependent on fission addresses, finish adding the type
// and all its dependent types.
for (auto &TU : TypeUnitsToAdd) {
InfoHolder.computeSizeAndOffsetsForUnit(TU.first.get());
InfoHolder.emitUnit(TU.first.get(), useSplitDwarf());
}
}
CU.addDIETypeSignature(RefDie, Signature);
}
// Add the Name along with its companion DIE to the appropriate accelerator
// table (for AccelTableKind::Dwarf it's always AccelDebugNames, for
// AccelTableKind::Apple, we use the table we got as an argument). If
// accelerator tables are disabled, this function does nothing.
template <typename DataT>
void DwarfDebug::addAccelNameImpl(AccelTable<DataT> &AppleAccel, StringRef Name,
const DIE &Die) {
if (getAccelTableKind() == AccelTableKind::None)
return;
DwarfFile &Holder = useSplitDwarf() ? SkeletonHolder : InfoHolder;
DwarfStringPoolEntryRef Ref =
Holder.getStringPool().getEntry(*Asm, Name);
switch (getAccelTableKind()) {
case AccelTableKind::Apple:
AppleAccel.addName(Ref, Die);
break;
case AccelTableKind::Dwarf:
AccelDebugNames.addName(Ref, Die);
break;
case AccelTableKind::Default:
llvm_unreachable("Default should have already been resolved.");
case AccelTableKind::None:
llvm_unreachable("None handled above");
}
}
void DwarfDebug::addAccelName(StringRef Name, const DIE &Die) {
addAccelNameImpl(AccelNames, Name, Die);
}
void DwarfDebug::addAccelObjC(StringRef Name, const DIE &Die) {
// ObjC names go only into the Apple accelerator tables.
if (getAccelTableKind() == AccelTableKind::Apple)
addAccelNameImpl(AccelObjC, Name, Die);
}
void DwarfDebug::addAccelNamespace(StringRef Name, const DIE &Die) {
addAccelNameImpl(AccelNamespace, Name, Die);
}
void DwarfDebug::addAccelType(StringRef Name, const DIE &Die, char Flags) {
addAccelNameImpl(AccelTypes, Name, Die);
}
uint16_t DwarfDebug::getDwarfVersion() const {
return Asm->OutStreamer->getContext().getDwarfVersion();
}
Index: projects/clang700-import/contrib/llvm/lib/Target/X86/X86DomainReassignment.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/lib/Target/X86/X86DomainReassignment.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/lib/Target/X86/X86DomainReassignment.cpp (revision 340125)
@@ -1,779 +1,800 @@
//===--- X86DomainReassignment.cpp - Selectively switch register classes---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass attempts to find instruction chains (closures) in one domain,
// and convert them to equivalent instructions in a different domain,
// if profitable.
//
//===----------------------------------------------------------------------===//
#include "X86.h"
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Printable.h"
#include <bitset>
using namespace llvm;
namespace llvm {
void initializeX86DomainReassignmentPass(PassRegistry &);
}
#define DEBUG_TYPE "x86-domain-reassignment"
STATISTIC(NumClosuresConverted, "Number of closures converted by the pass");
static cl::opt<bool> DisableX86DomainReassignment(
"disable-x86-domain-reassignment", cl::Hidden,
cl::desc("X86: Disable Virtual Register Reassignment."), cl::init(false));
namespace {
enum RegDomain { NoDomain = -1, GPRDomain, MaskDomain, OtherDomain, NumDomains };
static bool isGPR(const TargetRegisterClass *RC) {
return X86::GR64RegClass.hasSubClassEq(RC) ||
X86::GR32RegClass.hasSubClassEq(RC) ||
X86::GR16RegClass.hasSubClassEq(RC) ||
X86::GR8RegClass.hasSubClassEq(RC);
}
static bool isMask(const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) {
return X86::VK16RegClass.hasSubClassEq(RC);
}
static RegDomain getDomain(const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) {
if (isGPR(RC))
return GPRDomain;
if (isMask(RC, TRI))
return MaskDomain;
return OtherDomain;
}
/// Return a register class equivalent to \p SrcRC, in \p Domain.
static const TargetRegisterClass *getDstRC(const TargetRegisterClass *SrcRC,
RegDomain Domain) {
assert(Domain == MaskDomain && "add domain");
if (X86::GR8RegClass.hasSubClassEq(SrcRC))
return &X86::VK8RegClass;
if (X86::GR16RegClass.hasSubClassEq(SrcRC))
return &X86::VK16RegClass;
if (X86::GR32RegClass.hasSubClassEq(SrcRC))
return &X86::VK32RegClass;
if (X86::GR64RegClass.hasSubClassEq(SrcRC))
return &X86::VK64RegClass;
llvm_unreachable("add register class");
return nullptr;
}
/// Abstract Instruction Converter class.
class InstrConverterBase {
protected:
unsigned SrcOpcode;
public:
InstrConverterBase(unsigned SrcOpcode) : SrcOpcode(SrcOpcode) {}
virtual ~InstrConverterBase() {}
/// \returns true if \p MI is legal to convert.
virtual bool isLegal(const MachineInstr *MI,
const TargetInstrInfo *TII) const {
assert(MI->getOpcode() == SrcOpcode &&
"Wrong instruction passed to converter");
return true;
}
/// Applies conversion to \p MI.
///
/// \returns true if \p MI is no longer need, and can be deleted.
virtual bool convertInstr(MachineInstr *MI, const TargetInstrInfo *TII,
MachineRegisterInfo *MRI) const = 0;
/// \returns the cost increment incurred by converting \p MI.
virtual double getExtraCost(const MachineInstr *MI,
MachineRegisterInfo *MRI) const = 0;
};
/// An Instruction Converter which ignores the given instruction.
/// For example, PHI instructions can be safely ignored since only the registers
/// need to change.
class InstrIgnore : public InstrConverterBase {
public:
InstrIgnore(unsigned SrcOpcode) : InstrConverterBase(SrcOpcode) {}
bool convertInstr(MachineInstr *MI, const TargetInstrInfo *TII,
MachineRegisterInfo *MRI) const override {
assert(isLegal(MI, TII) && "Cannot convert instruction");
return false;
}
double getExtraCost(const MachineInstr *MI,
MachineRegisterInfo *MRI) const override {
return 0;
}
};
/// An Instruction Converter which replaces an instruction with another.
class InstrReplacer : public InstrConverterBase {
public:
/// Opcode of the destination instruction.
unsigned DstOpcode;
InstrReplacer(unsigned SrcOpcode, unsigned DstOpcode)
: InstrConverterBase(SrcOpcode), DstOpcode(DstOpcode) {}
bool isLegal(const MachineInstr *MI,
const TargetInstrInfo *TII) const override {
if (!InstrConverterBase::isLegal(MI, TII))
return false;
// It's illegal to replace an instruction that implicitly defines a register
// with an instruction that doesn't, unless that register dead.
for (auto &MO : MI->implicit_operands())
if (MO.isReg() && MO.isDef() && !MO.isDead() &&
!TII->get(DstOpcode).hasImplicitDefOfPhysReg(MO.getReg()))
return false;
return true;
}
bool convertInstr(MachineInstr *MI, const TargetInstrInfo *TII,
MachineRegisterInfo *MRI) const override {
assert(isLegal(MI, TII) && "Cannot convert instruction");
MachineInstrBuilder Bld =
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(DstOpcode));
// Transfer explicit operands from original instruction. Implicit operands
// are handled by BuildMI.
for (auto &Op : MI->explicit_operands())
Bld.add(Op);
return true;
}
double getExtraCost(const MachineInstr *MI,
MachineRegisterInfo *MRI) const override {
// Assuming instructions have the same cost.
return 0;
}
};
/// An Instruction Converter which replaces an instruction with another, and
/// adds a COPY from the new instruction's destination to the old one's.
class InstrReplacerDstCOPY : public InstrConverterBase {
public:
unsigned DstOpcode;
InstrReplacerDstCOPY(unsigned SrcOpcode, unsigned DstOpcode)
: InstrConverterBase(SrcOpcode), DstOpcode(DstOpcode) {}
bool convertInstr(MachineInstr *MI, const TargetInstrInfo *TII,
MachineRegisterInfo *MRI) const override {
assert(isLegal(MI, TII) && "Cannot convert instruction");
MachineBasicBlock *MBB = MI->getParent();
auto &DL = MI->getDebugLoc();
unsigned Reg = MRI->createVirtualRegister(
TII->getRegClass(TII->get(DstOpcode), 0, MRI->getTargetRegisterInfo(),
*MBB->getParent()));
MachineInstrBuilder Bld = BuildMI(*MBB, MI, DL, TII->get(DstOpcode), Reg);
for (unsigned Idx = 1, End = MI->getNumOperands(); Idx < End; ++Idx)
Bld.add(MI->getOperand(Idx));
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::COPY))
.add(MI->getOperand(0))
.addReg(Reg);
return true;
}
double getExtraCost(const MachineInstr *MI,
MachineRegisterInfo *MRI) const override {
// Assuming instructions have the same cost, and that COPY is in the same
// domain so it will be eliminated.
return 0;
}
};
/// An Instruction Converter for replacing COPY instructions.
class InstrCOPYReplacer : public InstrReplacer {
public:
RegDomain DstDomain;
InstrCOPYReplacer(unsigned SrcOpcode, RegDomain DstDomain, unsigned DstOpcode)
: InstrReplacer(SrcOpcode, DstOpcode), DstDomain(DstDomain) {}
+ bool isLegal(const MachineInstr *MI,
+ const TargetInstrInfo *TII) const override {
+ if (!InstrConverterBase::isLegal(MI, TII))
+ return false;
+
+ // Don't allow copies to/flow GR8/GR16 physical registers.
+ // FIXME: Is there some better way to support this?
+ unsigned DstReg = MI->getOperand(0).getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(DstReg) &&
+ (X86::GR8RegClass.contains(DstReg) ||
+ X86::GR16RegClass.contains(DstReg)))
+ return false;
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
+ (X86::GR8RegClass.contains(SrcReg) ||
+ X86::GR16RegClass.contains(SrcReg)))
+ return false;
+
+ return true;
+ }
+
double getExtraCost(const MachineInstr *MI,
MachineRegisterInfo *MRI) const override {
assert(MI->getOpcode() == TargetOpcode::COPY && "Expected a COPY");
for (auto &MO : MI->operands()) {
// Physical registers will not be converted. Assume that converting the
// COPY to the destination domain will eventually result in a actual
// instruction.
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
return 1;
RegDomain OpDomain = getDomain(MRI->getRegClass(MO.getReg()),
MRI->getTargetRegisterInfo());
// Converting a cross domain COPY to a same domain COPY should eliminate
// an insturction
if (OpDomain == DstDomain)
return -1;
}
return 0;
}
};
/// An Instruction Converter which replaces an instruction with a COPY.
class InstrReplaceWithCopy : public InstrConverterBase {
public:
// Source instruction operand Index, to be used as the COPY source.
unsigned SrcOpIdx;
InstrReplaceWithCopy(unsigned SrcOpcode, unsigned SrcOpIdx)
: InstrConverterBase(SrcOpcode), SrcOpIdx(SrcOpIdx) {}
bool convertInstr(MachineInstr *MI, const TargetInstrInfo *TII,
MachineRegisterInfo *MRI) const override {
assert(isLegal(MI, TII) && "Cannot convert instruction");
BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
TII->get(TargetOpcode::COPY))
.add({MI->getOperand(0), MI->getOperand(SrcOpIdx)});
return true;
}
double getExtraCost(const MachineInstr *MI,
MachineRegisterInfo *MRI) const override {
return 0;
}
};
// Key type to be used by the Instruction Converters map.
// A converter is identified by <destination domain, source opcode>
typedef std::pair<int, unsigned> InstrConverterBaseKeyTy;
typedef DenseMap<InstrConverterBaseKeyTy, InstrConverterBase *>
InstrConverterBaseMap;
/// A closure is a set of virtual register representing all of the edges in
/// the closure, as well as all of the instructions connected by those edges.
///
/// A closure may encompass virtual registers in the same register bank that
/// have different widths. For example, it may contain 32-bit GPRs as well as
/// 64-bit GPRs.
///
/// A closure that computes an address (i.e. defines a virtual register that is
/// used in a memory operand) excludes the instructions that contain memory
/// operands using the address. Such an instruction will be included in a
/// different closure that manipulates the loaded or stored value.
class Closure {
private:
/// Virtual registers in the closure.
DenseSet<unsigned> Edges;
/// Instructions in the closure.
SmallVector<MachineInstr *, 8> Instrs;
/// Domains which this closure can legally be reassigned to.
std::bitset<NumDomains> LegalDstDomains;
/// An ID to uniquely identify this closure, even when it gets
/// moved around
unsigned ID;
public:
Closure(unsigned ID, std::initializer_list<RegDomain> LegalDstDomainList) : ID(ID) {
for (RegDomain D : LegalDstDomainList)
LegalDstDomains.set(D);
}
/// Mark this closure as illegal for reassignment to all domains.
void setAllIllegal() { LegalDstDomains.reset(); }
/// \returns true if this closure has domains which are legal to reassign to.
bool hasLegalDstDomain() const { return LegalDstDomains.any(); }
/// \returns true if is legal to reassign this closure to domain \p RD.
bool isLegal(RegDomain RD) const { return LegalDstDomains[RD]; }
/// Mark this closure as illegal for reassignment to domain \p RD.
void setIllegal(RegDomain RD) { LegalDstDomains[RD] = false; }
bool empty() const { return Edges.empty(); }
bool insertEdge(unsigned Reg) {
return Edges.insert(Reg).second;
}
using const_edge_iterator = DenseSet<unsigned>::const_iterator;
iterator_range<const_edge_iterator> edges() const {
return iterator_range<const_edge_iterator>(Edges.begin(), Edges.end());
}
void addInstruction(MachineInstr *I) {
Instrs.push_back(I);
}
ArrayRef<MachineInstr *> instructions() const {
return Instrs;
}
LLVM_DUMP_METHOD void dump(const MachineRegisterInfo *MRI) const {
dbgs() << "Registers: ";
bool First = true;
for (unsigned Reg : Edges) {
if (!First)
dbgs() << ", ";
First = false;
dbgs() << printReg(Reg, MRI->getTargetRegisterInfo(), 0, MRI);
}
dbgs() << "\n" << "Instructions:";
for (MachineInstr *MI : Instrs) {
dbgs() << "\n ";
MI->print(dbgs());
}
dbgs() << "\n";
}
unsigned getID() const {
return ID;
}
};
class X86DomainReassignment : public MachineFunctionPass {
const X86Subtarget *STI;
MachineRegisterInfo *MRI;
const X86InstrInfo *TII;
/// All edges that are included in some closure
DenseSet<unsigned> EnclosedEdges;
/// All instructions that are included in some closure.
DenseMap<MachineInstr *, unsigned> EnclosedInstrs;
public:
static char ID;
X86DomainReassignment() : MachineFunctionPass(ID) {
initializeX86DomainReassignmentPass(*PassRegistry::getPassRegistry());
}
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
StringRef getPassName() const override {
return "X86 Domain Reassignment Pass";
}
private:
/// A map of available Instruction Converters.
InstrConverterBaseMap Converters;
/// Initialize Converters map.
void initConverters();
/// Starting from \Reg, expand the closure as much as possible.
void buildClosure(Closure &, unsigned Reg);
/// Enqueue \p Reg to be considered for addition to the closure.
void visitRegister(Closure &, unsigned Reg, RegDomain &Domain,
SmallVectorImpl<unsigned> &Worklist);
/// Reassign the closure to \p Domain.
void reassign(const Closure &C, RegDomain Domain) const;
/// Add \p MI to the closure.
void encloseInstr(Closure &C, MachineInstr *MI);
/// /returns true if it is profitable to reassign the closure to \p Domain.
bool isReassignmentProfitable(const Closure &C, RegDomain Domain) const;
/// Calculate the total cost of reassigning the closure to \p Domain.
double calculateCost(const Closure &C, RegDomain Domain) const;
};
char X86DomainReassignment::ID = 0;
} // End anonymous namespace.
void X86DomainReassignment::visitRegister(Closure &C, unsigned Reg,
RegDomain &Domain,
SmallVectorImpl<unsigned> &Worklist) {
if (EnclosedEdges.count(Reg))
return;
if (!TargetRegisterInfo::isVirtualRegister(Reg))
return;
if (!MRI->hasOneDef(Reg))
return;
RegDomain RD = getDomain(MRI->getRegClass(Reg), MRI->getTargetRegisterInfo());
// First edge in closure sets the domain.
if (Domain == NoDomain)
Domain = RD;
if (Domain != RD)
return;
Worklist.push_back(Reg);
}
void X86DomainReassignment::encloseInstr(Closure &C, MachineInstr *MI) {
auto I = EnclosedInstrs.find(MI);
if (I != EnclosedInstrs.end()) {
if (I->second != C.getID())
// Instruction already belongs to another closure, avoid conflicts between
// closure and mark this closure as illegal.
C.setAllIllegal();
return;
}
EnclosedInstrs[MI] = C.getID();
C.addInstruction(MI);
// Mark closure as illegal for reassignment to domains, if there is no
// converter for the instruction or if the converter cannot convert the
// instruction.
for (int i = 0; i != NumDomains; ++i) {
if (C.isLegal((RegDomain)i)) {
InstrConverterBase *IC = Converters.lookup({i, MI->getOpcode()});
if (!IC || !IC->isLegal(MI, TII))
C.setIllegal((RegDomain)i);
}
}
}
double X86DomainReassignment::calculateCost(const Closure &C,
RegDomain DstDomain) const {
assert(C.isLegal(DstDomain) && "Cannot calculate cost for illegal closure");
double Cost = 0.0;
for (auto *MI : C.instructions())
Cost +=
Converters.lookup({DstDomain, MI->getOpcode()})->getExtraCost(MI, MRI);
return Cost;
}
bool X86DomainReassignment::isReassignmentProfitable(const Closure &C,
RegDomain Domain) const {
return calculateCost(C, Domain) < 0.0;
}
void X86DomainReassignment::reassign(const Closure &C, RegDomain Domain) const {
assert(C.isLegal(Domain) && "Cannot convert illegal closure");
// Iterate all instructions in the closure, convert each one using the
// appropriate converter.
SmallVector<MachineInstr *, 8> ToErase;
for (auto *MI : C.instructions())
if (Converters.lookup({Domain, MI->getOpcode()})
->convertInstr(MI, TII, MRI))
ToErase.push_back(MI);
// Iterate all registers in the closure, replace them with registers in the
// destination domain.
for (unsigned Reg : C.edges()) {
MRI->setRegClass(Reg, getDstRC(MRI->getRegClass(Reg), Domain));
for (auto &MO : MRI->use_operands(Reg)) {
if (MO.isReg())
// Remove all subregister references as they are not valid in the
// destination domain.
MO.setSubReg(0);
}
}
for (auto MI : ToErase)
MI->eraseFromParent();
}
/// \returns true when \p Reg is used as part of an address calculation in \p
/// MI.
static bool usedAsAddr(const MachineInstr &MI, unsigned Reg,
const TargetInstrInfo *TII) {
if (!MI.mayLoadOrStore())
return false;
const MCInstrDesc &Desc = TII->get(MI.getOpcode());
int MemOpStart = X86II::getMemoryOperandNo(Desc.TSFlags);
if (MemOpStart == -1)
return false;
MemOpStart += X86II::getOperandBias(Desc);
for (unsigned MemOpIdx = MemOpStart,
MemOpEnd = MemOpStart + X86::AddrNumOperands;
MemOpIdx < MemOpEnd; ++MemOpIdx) {
auto &Op = MI.getOperand(MemOpIdx);
if (Op.isReg() && Op.getReg() == Reg)
return true;
}
return false;
}
void X86DomainReassignment::buildClosure(Closure &C, unsigned Reg) {
SmallVector<unsigned, 4> Worklist;
RegDomain Domain = NoDomain;
visitRegister(C, Reg, Domain, Worklist);
while (!Worklist.empty()) {
unsigned CurReg = Worklist.pop_back_val();
// Register already in this closure.
if (!C.insertEdge(CurReg))
continue;
MachineInstr *DefMI = MRI->getVRegDef(CurReg);
encloseInstr(C, DefMI);
// Add register used by the defining MI to the worklist.
// Do not add registers which are used in address calculation, they will be
// added to a different closure.
int OpEnd = DefMI->getNumOperands();
const MCInstrDesc &Desc = DefMI->getDesc();
int MemOp = X86II::getMemoryOperandNo(Desc.TSFlags);
if (MemOp != -1)
MemOp += X86II::getOperandBias(Desc);
for (int OpIdx = 0; OpIdx < OpEnd; ++OpIdx) {
if (OpIdx == MemOp) {
// skip address calculation.
OpIdx += (X86::AddrNumOperands - 1);
continue;
}
auto &Op = DefMI->getOperand(OpIdx);
if (!Op.isReg() || !Op.isUse())
continue;
visitRegister(C, Op.getReg(), Domain, Worklist);
}
// Expand closure through register uses.
for (auto &UseMI : MRI->use_nodbg_instructions(CurReg)) {
// We would like to avoid converting closures which calculare addresses,
// as this should remain in GPRs.
if (usedAsAddr(UseMI, CurReg, TII)) {
C.setAllIllegal();
continue;
}
encloseInstr(C, &UseMI);
for (auto &DefOp : UseMI.defs()) {
if (!DefOp.isReg())
continue;
unsigned DefReg = DefOp.getReg();
if (!TargetRegisterInfo::isVirtualRegister(DefReg)) {
C.setAllIllegal();
continue;
}
visitRegister(C, DefReg, Domain, Worklist);
}
}
}
}
void X86DomainReassignment::initConverters() {
Converters[{MaskDomain, TargetOpcode::PHI}] =
new InstrIgnore(TargetOpcode::PHI);
Converters[{MaskDomain, TargetOpcode::IMPLICIT_DEF}] =
new InstrIgnore(TargetOpcode::IMPLICIT_DEF);
Converters[{MaskDomain, TargetOpcode::INSERT_SUBREG}] =
new InstrReplaceWithCopy(TargetOpcode::INSERT_SUBREG, 2);
Converters[{MaskDomain, TargetOpcode::COPY}] =
new InstrCOPYReplacer(TargetOpcode::COPY, MaskDomain, TargetOpcode::COPY);
auto createReplacerDstCOPY = [&](unsigned From, unsigned To) {
Converters[{MaskDomain, From}] = new InstrReplacerDstCOPY(From, To);
};
createReplacerDstCOPY(X86::MOVZX32rm16, X86::KMOVWkm);
createReplacerDstCOPY(X86::MOVZX64rm16, X86::KMOVWkm);
createReplacerDstCOPY(X86::MOVZX32rr16, X86::KMOVWkk);
createReplacerDstCOPY(X86::MOVZX64rr16, X86::KMOVWkk);
if (STI->hasDQI()) {
createReplacerDstCOPY(X86::MOVZX16rm8, X86::KMOVBkm);
createReplacerDstCOPY(X86::MOVZX32rm8, X86::KMOVBkm);
createReplacerDstCOPY(X86::MOVZX64rm8, X86::KMOVBkm);
createReplacerDstCOPY(X86::MOVZX16rr8, X86::KMOVBkk);
createReplacerDstCOPY(X86::MOVZX32rr8, X86::KMOVBkk);
createReplacerDstCOPY(X86::MOVZX64rr8, X86::KMOVBkk);
}
auto createReplacer = [&](unsigned From, unsigned To) {
Converters[{MaskDomain, From}] = new InstrReplacer(From, To);
};
createReplacer(X86::MOV16rm, X86::KMOVWkm);
createReplacer(X86::MOV16mr, X86::KMOVWmk);
createReplacer(X86::MOV16rr, X86::KMOVWkk);
createReplacer(X86::SHR16ri, X86::KSHIFTRWri);
createReplacer(X86::SHL16ri, X86::KSHIFTLWri);
createReplacer(X86::NOT16r, X86::KNOTWrr);
createReplacer(X86::OR16rr, X86::KORWrr);
createReplacer(X86::AND16rr, X86::KANDWrr);
createReplacer(X86::XOR16rr, X86::KXORWrr);
if (STI->hasBWI()) {
createReplacer(X86::MOV32rm, X86::KMOVDkm);
createReplacer(X86::MOV64rm, X86::KMOVQkm);
createReplacer(X86::MOV32mr, X86::KMOVDmk);
createReplacer(X86::MOV64mr, X86::KMOVQmk);
createReplacer(X86::MOV32rr, X86::KMOVDkk);
createReplacer(X86::MOV64rr, X86::KMOVQkk);
createReplacer(X86::SHR32ri, X86::KSHIFTRDri);
createReplacer(X86::SHR64ri, X86::KSHIFTRQri);
createReplacer(X86::SHL32ri, X86::KSHIFTLDri);
createReplacer(X86::SHL64ri, X86::KSHIFTLQri);
createReplacer(X86::ADD32rr, X86::KADDDrr);
createReplacer(X86::ADD64rr, X86::KADDQrr);
createReplacer(X86::NOT32r, X86::KNOTDrr);
createReplacer(X86::NOT64r, X86::KNOTQrr);
createReplacer(X86::OR32rr, X86::KORDrr);
createReplacer(X86::OR64rr, X86::KORQrr);
createReplacer(X86::AND32rr, X86::KANDDrr);
createReplacer(X86::AND64rr, X86::KANDQrr);
createReplacer(X86::ANDN32rr, X86::KANDNDrr);
createReplacer(X86::ANDN64rr, X86::KANDNQrr);
createReplacer(X86::XOR32rr, X86::KXORDrr);
createReplacer(X86::XOR64rr, X86::KXORQrr);
// TODO: KTEST is not a replacement for TEST due to flag differences. Need
// to prove only Z flag is used.
//createReplacer(X86::TEST32rr, X86::KTESTDrr);
//createReplacer(X86::TEST64rr, X86::KTESTQrr);
}
if (STI->hasDQI()) {
createReplacer(X86::ADD8rr, X86::KADDBrr);
createReplacer(X86::ADD16rr, X86::KADDWrr);
createReplacer(X86::AND8rr, X86::KANDBrr);
createReplacer(X86::MOV8rm, X86::KMOVBkm);
createReplacer(X86::MOV8mr, X86::KMOVBmk);
createReplacer(X86::MOV8rr, X86::KMOVBkk);
createReplacer(X86::NOT8r, X86::KNOTBrr);
createReplacer(X86::OR8rr, X86::KORBrr);
createReplacer(X86::SHR8ri, X86::KSHIFTRBri);
createReplacer(X86::SHL8ri, X86::KSHIFTLBri);
// TODO: KTEST is not a replacement for TEST due to flag differences. Need
// to prove only Z flag is used.
//createReplacer(X86::TEST8rr, X86::KTESTBrr);
//createReplacer(X86::TEST16rr, X86::KTESTWrr);
createReplacer(X86::XOR8rr, X86::KXORBrr);
}
}
bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) {
if (skipFunction(MF.getFunction()))
return false;
if (DisableX86DomainReassignment)
return false;
LLVM_DEBUG(
dbgs() << "***** Machine Function before Domain Reassignment *****\n");
LLVM_DEBUG(MF.print(dbgs()));
STI = &MF.getSubtarget<X86Subtarget>();
// GPR->K is the only transformation currently supported, bail out early if no
// AVX512.
if (!STI->hasAVX512())
return false;
MRI = &MF.getRegInfo();
assert(MRI->isSSA() && "Expected MIR to be in SSA form");
TII = STI->getInstrInfo();
initConverters();
bool Changed = false;
EnclosedEdges.clear();
EnclosedInstrs.clear();
std::vector<Closure> Closures;
// Go over all virtual registers and calculate a closure.
unsigned ClosureID = 0;
for (unsigned Idx = 0; Idx < MRI->getNumVirtRegs(); ++Idx) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(Idx);
// GPR only current source domain supported.
if (!isGPR(MRI->getRegClass(Reg)))
continue;
// Register already in closure.
if (EnclosedEdges.count(Reg))
continue;
// Calculate closure starting with Reg.
Closure C(ClosureID++, {MaskDomain});
buildClosure(C, Reg);
// Collect all closures that can potentially be converted.
if (!C.empty() && C.isLegal(MaskDomain))
Closures.push_back(std::move(C));
}
for (Closure &C : Closures) {
LLVM_DEBUG(C.dump(MRI));
if (isReassignmentProfitable(C, MaskDomain)) {
reassign(C, MaskDomain);
++NumClosuresConverted;
Changed = true;
}
}
DeleteContainerSeconds(Converters);
LLVM_DEBUG(
dbgs() << "***** Machine Function after Domain Reassignment *****\n");
LLVM_DEBUG(MF.print(dbgs()));
return Changed;
}
INITIALIZE_PASS(X86DomainReassignment, "x86-domain-reassignment",
"X86 Domain Reassignment Pass", false, false)
/// Returns an instance of the Domain Reassignment pass.
FunctionPass *llvm::createX86DomainReassignmentPass() {
return new X86DomainReassignment();
}
Index: projects/clang700-import/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp (revision 340125)
@@ -1,40830 +1,40829 @@
//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that X86 uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//
#include "X86ISelLowering.h"
#include "Utils/X86ShuffleDecode.h"
#include "X86CallingConv.h"
#include "X86FrameLowering.h"
#include "X86InstrBuilder.h"
#include "X86IntrinsicsInfo.h"
#include "X86MachineFunctionInfo.h"
#include "X86ShuffleDecodeConstantPool.h"
#include "X86TargetMachine.h"
#include "X86TargetObjectFile.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/CodeGen/IntrinsicLowering.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetLowering.h"
#include "llvm/CodeGen/WinEHFuncInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetOptions.h"
#include <algorithm>
#include <bitset>
#include <cctype>
#include <numeric>
using namespace llvm;
#define DEBUG_TYPE "x86-isel"
STATISTIC(NumTailCalls, "Number of tail calls");
static cl::opt<bool> ExperimentalVectorWideningLegalization(
"x86-experimental-vector-widening-legalization", cl::init(false),
cl::desc("Enable an experimental vector type legalization through widening "
"rather than promotion."),
cl::Hidden);
static cl::opt<int> ExperimentalPrefLoopAlignment(
"x86-experimental-pref-loop-alignment", cl::init(4),
cl::desc("Sets the preferable loop alignment for experiments "
"(the last x86-experimental-pref-loop-alignment bits"
" of the loop header PC will be 0)."),
cl::Hidden);
static cl::opt<bool> MulConstantOptimization(
"mul-constant-optimization", cl::init(true),
cl::desc("Replace 'mul x, Const' with more effective instructions like "
"SHIFT, LEA, etc."),
cl::Hidden);
/// Call this when the user attempts to do something unsupported, like
/// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
/// report_fatal_error, so calling code should attempt to recover without
/// crashing.
static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
const char *Msg) {
MachineFunction &MF = DAG.getMachineFunction();
DAG.getContext()->diagnose(
DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
}
X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
const X86Subtarget &STI)
: TargetLowering(TM), Subtarget(STI) {
bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
X86ScalarSSEf64 = Subtarget.hasSSE2();
X86ScalarSSEf32 = Subtarget.hasSSE1();
MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
// Set up the TargetLowering object.
// X86 is weird. It always uses i8 for shift amounts and setcc results.
setBooleanContents(ZeroOrOneBooleanContent);
// X86-SSE is even stranger. It uses -1 or 0 for vector masks.
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
// For 64-bit, since we have so many registers, use the ILP scheduler.
// For 32-bit, use the register pressure specific scheduling.
// For Atom, always use ILP scheduling.
if (Subtarget.isAtom())
setSchedulingPreference(Sched::ILP);
else if (Subtarget.is64Bit())
setSchedulingPreference(Sched::ILP);
else
setSchedulingPreference(Sched::RegPressure);
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
// Bypass expensive divides and use cheaper ones.
if (TM.getOptLevel() >= CodeGenOpt::Default) {
if (Subtarget.hasSlowDivide32())
addBypassSlowDiv(32, 8);
if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
addBypassSlowDiv(64, 32);
}
if (Subtarget.isTargetKnownWindowsMSVC() ||
Subtarget.isTargetWindowsItanium()) {
// Setup Windows compiler runtime calls.
setLibcallName(RTLIB::SDIV_I64, "_alldiv");
setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
setLibcallName(RTLIB::SREM_I64, "_allrem");
setLibcallName(RTLIB::UREM_I64, "_aullrem");
setLibcallName(RTLIB::MUL_I64, "_allmul");
setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
}
if (Subtarget.isTargetDarwin()) {
// Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
setUseUnderscoreSetJmp(false);
setUseUnderscoreLongJmp(false);
} else if (Subtarget.isTargetWindowsGNU()) {
// MS runtime is weird: it exports _setjmp, but longjmp!
setUseUnderscoreSetJmp(true);
setUseUnderscoreLongJmp(false);
} else {
setUseUnderscoreSetJmp(true);
setUseUnderscoreLongJmp(true);
}
// Set up the register classes.
addRegisterClass(MVT::i8, &X86::GR8RegClass);
addRegisterClass(MVT::i16, &X86::GR16RegClass);
addRegisterClass(MVT::i32, &X86::GR32RegClass);
if (Subtarget.is64Bit())
addRegisterClass(MVT::i64, &X86::GR64RegClass);
for (MVT VT : MVT::integer_valuetypes())
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
// We don't accept any truncstore of integer registers.
setTruncStoreAction(MVT::i64, MVT::i32, Expand);
setTruncStoreAction(MVT::i64, MVT::i16, Expand);
setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
setTruncStoreAction(MVT::i32, MVT::i16, Expand);
setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
setTruncStoreAction(MVT::i16, MVT::i8, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// SETOEQ and SETUNE require checking two conditions.
setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
// Integer absolute.
if (Subtarget.hasCMov()) {
setOperationAction(ISD::ABS , MVT::i16 , Custom);
setOperationAction(ISD::ABS , MVT::i32 , Custom);
if (Subtarget.is64Bit())
setOperationAction(ISD::ABS , MVT::i64 , Custom);
}
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
// operation.
setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
if (Subtarget.is64Bit()) {
if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512())
// f32/f64 are legal, f80 is custom.
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
else
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
} else if (!Subtarget.useSoftFloat()) {
// We have an algorithm for SSE2->double, and we turn this into a
// 64-bit FILD followed by conditional FADD for other targets.
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
// We have an algorithm for SSE2, and we turn this into a 64-bit
// FILD or VCVTUSI2SS/SD for other targets.
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
} else {
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
}
// Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
// this operation.
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
if (!Subtarget.useSoftFloat()) {
// SSE has no i16 to fp conversion, only i32.
if (X86ScalarSSEf32) {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
// f32 and f64 cases are Legal, f80 case is not
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
} else {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
}
} else {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Expand);
}
// Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
// this operation.
setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
if (!Subtarget.useSoftFloat()) {
// In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
// are Legal, f80 is custom lowered.
setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
if (X86ScalarSSEf32) {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
// f32 and f64 cases are Legal, f80 case is not
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
} else {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
}
} else {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Expand);
setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Expand);
}
// Handle FP_TO_UINT by promoting the destination to a larger signed
// conversion.
setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
if (Subtarget.is64Bit()) {
if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
// FP_TO_UINT-i32/i64 is legal for f32/f64, but custom for f80.
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
} else {
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
}
} else if (!Subtarget.useSoftFloat()) {
// Since AVX is a superset of SSE3, only check for SSE here.
if (Subtarget.hasSSE1() && !Subtarget.hasSSE3())
// Expand FP_TO_UINT into a select.
// FIXME: We would like to use a Custom expander here eventually to do
// the optimal thing for SSE vs. the default expansion in the legalizer.
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
else
// With AVX512 we can use vcvts[ds]2usi for f32/f64->i32, f80 is custom.
// With SSE3 we can use fisttpll to convert to a signed i64; without
// SSE, we're stuck with a fistpll.
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
}
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
if (!X86ScalarSSEf64) {
setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
if (Subtarget.is64Bit()) {
setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
// Without SSE, i64->f64 goes through memory.
setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
}
} else if (!Subtarget.is64Bit())
setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
// Scalar integer divide and remainder are lowered to use operations that
// produce two results, to match the available instructions. This exposes
// the two-result form to trivial CSE, which is able to combine x/y and x%y
// into a single instruction.
//
// Scalar integer multiply-high is also lowered to use two-result
// operations, to match the available instructions. However, plain multiply
// (low) operations are left as Legal, as there are single-result
// instructions for this in x86. Using the two-result multiply instructions
// when both high and low results are needed must be arranged by dagcombine.
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
setOperationAction(ISD::MULHS, VT, Expand);
setOperationAction(ISD::MULHU, VT, Expand);
setOperationAction(ISD::SDIV, VT, Expand);
setOperationAction(ISD::UDIV, VT, Expand);
setOperationAction(ISD::SREM, VT, Expand);
setOperationAction(ISD::UREM, VT, Expand);
}
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
setOperationAction(ISD::BRCOND , MVT::Other, Custom);
for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
setOperationAction(ISD::BR_CC, VT, Expand);
setOperationAction(ISD::SELECT_CC, VT, Expand);
}
if (Subtarget.is64Bit())
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
setOperationAction(ISD::FREM , MVT::f32 , Expand);
setOperationAction(ISD::FREM , MVT::f64 , Expand);
setOperationAction(ISD::FREM , MVT::f80 , Expand);
setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
// Promote the i8 variants and force them on up to i32 which has a shorter
// encoding.
setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
if (!Subtarget.hasBMI()) {
setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
if (Subtarget.is64Bit()) {
setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
}
}
if (Subtarget.hasLZCNT()) {
// When promoting the i8 variants, force them to i32 for a shorter
// encoding.
setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
} else {
setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
if (Subtarget.is64Bit()) {
setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
}
}
// Special handling for half-precision floating point conversions.
// If we don't have F16C support, then lower half float conversions
// into library calls.
if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
}
// There's never any support for operations beyond MVT::f32.
setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
setTruncStoreAction(MVT::f32, MVT::f16, Expand);
setTruncStoreAction(MVT::f64, MVT::f16, Expand);
setTruncStoreAction(MVT::f80, MVT::f16, Expand);
if (Subtarget.hasPOPCNT()) {
setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
} else {
setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
if (Subtarget.is64Bit())
setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
}
setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
if (!Subtarget.hasMOVBE())
setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
// These should be promoted to a larger select which is supported.
setOperationAction(ISD::SELECT , MVT::i1 , Promote);
// X86 wants to expand cmov itself.
for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
setOperationAction(ISD::SELECT, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
}
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
if (VT == MVT::i64 && !Subtarget.is64Bit())
continue;
setOperationAction(ISD::SELECT, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
}
// Custom action for SELECT MMX and expand action for SELECT_CC MMX
setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
// NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
// LLVM/Clang supports zero-cost DWARF and SEH exception handling.
setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
// Darwin ABI issue.
for (auto VT : { MVT::i32, MVT::i64 }) {
if (VT == MVT::i64 && !Subtarget.is64Bit())
continue;
setOperationAction(ISD::ConstantPool , VT, Custom);
setOperationAction(ISD::JumpTable , VT, Custom);
setOperationAction(ISD::GlobalAddress , VT, Custom);
setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
setOperationAction(ISD::ExternalSymbol , VT, Custom);
setOperationAction(ISD::BlockAddress , VT, Custom);
}
// 64-bit shl, sra, srl (iff 32-bit x86)
for (auto VT : { MVT::i32, MVT::i64 }) {
if (VT == MVT::i64 && !Subtarget.is64Bit())
continue;
setOperationAction(ISD::SHL_PARTS, VT, Custom);
setOperationAction(ISD::SRA_PARTS, VT, Custom);
setOperationAction(ISD::SRL_PARTS, VT, Custom);
}
if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow())
setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
// Expand certain atomics
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
}
if (Subtarget.hasCmpxchg16b()) {
setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
}
// FIXME - use subtarget debug flags
if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
!Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
}
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
setOperationAction(ISD::TRAP, MVT::Other, Legal);
setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
bool Is64Bit = Subtarget.is64Bit();
setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
// GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
// f32 and f64 use SSE.
// Set up the FP register classes.
addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
: &X86::FR32RegClass);
addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
: &X86::FR64RegClass);
for (auto VT : { MVT::f32, MVT::f64 }) {
// Use ANDPD to simulate FABS.
setOperationAction(ISD::FABS, VT, Custom);
// Use XORP to simulate FNEG.
setOperationAction(ISD::FNEG, VT, Custom);
// Use ANDPD and ORPD to simulate FCOPYSIGN.
setOperationAction(ISD::FCOPYSIGN, VT, Custom);
// We don't support sin/cos/fmod
setOperationAction(ISD::FSIN , VT, Expand);
setOperationAction(ISD::FCOS , VT, Expand);
setOperationAction(ISD::FSINCOS, VT, Expand);
}
// Lower this to MOVMSK plus an AND.
setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
// Expand FP immediates into loads from the stack, except for the special
// cases we handle.
addLegalFPImmediate(APFloat(+0.0)); // xorpd
addLegalFPImmediate(APFloat(+0.0f)); // xorps
} else if (UseX87 && X86ScalarSSEf32) {
// Use SSE for f32, x87 for f64.
// Set up the FP register classes.
addRegisterClass(MVT::f32, &X86::FR32RegClass);
addRegisterClass(MVT::f64, &X86::RFP64RegClass);
// Use ANDPS to simulate FABS.
setOperationAction(ISD::FABS , MVT::f32, Custom);
// Use XORP to simulate FNEG.
setOperationAction(ISD::FNEG , MVT::f32, Custom);
setOperationAction(ISD::UNDEF, MVT::f64, Expand);
// Use ANDPS and ORPS to simulate FCOPYSIGN.
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
// We don't support sin/cos/fmod
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
// Special cases we handle for FP constants.
addLegalFPImmediate(APFloat(+0.0f)); // xorps
addLegalFPImmediate(APFloat(+0.0)); // FLD0
addLegalFPImmediate(APFloat(+1.0)); // FLD1
addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
// Always expand sin/cos functions even though x87 has an instruction.
setOperationAction(ISD::FSIN , MVT::f64, Expand);
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
} else if (UseX87) {
// f32 and f64 in x87.
// Set up the FP register classes.
addRegisterClass(MVT::f64, &X86::RFP64RegClass);
addRegisterClass(MVT::f32, &X86::RFP32RegClass);
for (auto VT : { MVT::f32, MVT::f64 }) {
setOperationAction(ISD::UNDEF, VT, Expand);
setOperationAction(ISD::FCOPYSIGN, VT, Expand);
// Always expand sin/cos functions even though x87 has an instruction.
setOperationAction(ISD::FSIN , VT, Expand);
setOperationAction(ISD::FCOS , VT, Expand);
setOperationAction(ISD::FSINCOS, VT, Expand);
}
addLegalFPImmediate(APFloat(+0.0)); // FLD0
addLegalFPImmediate(APFloat(+1.0)); // FLD1
addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
addLegalFPImmediate(APFloat(+0.0f)); // FLD0
addLegalFPImmediate(APFloat(+1.0f)); // FLD1
addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
}
// We don't support FMA.
setOperationAction(ISD::FMA, MVT::f64, Expand);
setOperationAction(ISD::FMA, MVT::f32, Expand);
// Long double always uses X87, except f128 in MMX.
if (UseX87) {
if (Subtarget.is64Bit() && Subtarget.hasMMX()) {
addRegisterClass(MVT::f128, &X86::VR128RegClass);
ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
setOperationAction(ISD::FABS , MVT::f128, Custom);
setOperationAction(ISD::FNEG , MVT::f128, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
}
addRegisterClass(MVT::f80, &X86::RFP80RegClass);
setOperationAction(ISD::UNDEF, MVT::f80, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
{
APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
addLegalFPImmediate(TmpFlt); // FLD0
TmpFlt.changeSign();
addLegalFPImmediate(TmpFlt); // FLD0/FCHS
bool ignored;
APFloat TmpFlt2(+1.0);
TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
&ignored);
addLegalFPImmediate(TmpFlt2); // FLD1
TmpFlt2.changeSign();
addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
}
// Always expand sin/cos functions even though x87 has an instruction.
setOperationAction(ISD::FSIN , MVT::f80, Expand);
setOperationAction(ISD::FCOS , MVT::f80, Expand);
setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
setOperationAction(ISD::FCEIL, MVT::f80, Expand);
setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
setOperationAction(ISD::FRINT, MVT::f80, Expand);
setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
setOperationAction(ISD::FMA, MVT::f80, Expand);
}
// Always use a library call for pow.
setOperationAction(ISD::FPOW , MVT::f32 , Expand);
setOperationAction(ISD::FPOW , MVT::f64 , Expand);
setOperationAction(ISD::FPOW , MVT::f80 , Expand);
setOperationAction(ISD::FLOG, MVT::f80, Expand);
setOperationAction(ISD::FLOG2, MVT::f80, Expand);
setOperationAction(ISD::FLOG10, MVT::f80, Expand);
setOperationAction(ISD::FEXP, MVT::f80, Expand);
setOperationAction(ISD::FEXP2, MVT::f80, Expand);
setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
// Some FP actions are always expanded for vector types.
for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
setOperationAction(ISD::FSIN, VT, Expand);
setOperationAction(ISD::FSINCOS, VT, Expand);
setOperationAction(ISD::FCOS, VT, Expand);
setOperationAction(ISD::FREM, VT, Expand);
setOperationAction(ISD::FCOPYSIGN, VT, Expand);
setOperationAction(ISD::FPOW, VT, Expand);
setOperationAction(ISD::FLOG, VT, Expand);
setOperationAction(ISD::FLOG2, VT, Expand);
setOperationAction(ISD::FLOG10, VT, Expand);
setOperationAction(ISD::FEXP, VT, Expand);
setOperationAction(ISD::FEXP2, VT, Expand);
}
// First set operation action for all vector types to either promote
// (for widening) or expand (for scalarization). Then we will selectively
// turn on ones that can be effectively codegen'd.
for (MVT VT : MVT::vector_valuetypes()) {
setOperationAction(ISD::SDIV, VT, Expand);
setOperationAction(ISD::UDIV, VT, Expand);
setOperationAction(ISD::SREM, VT, Expand);
setOperationAction(ISD::UREM, VT, Expand);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
setOperationAction(ISD::FMA, VT, Expand);
setOperationAction(ISD::FFLOOR, VT, Expand);
setOperationAction(ISD::FCEIL, VT, Expand);
setOperationAction(ISD::FTRUNC, VT, Expand);
setOperationAction(ISD::FRINT, VT, Expand);
setOperationAction(ISD::FNEARBYINT, VT, Expand);
setOperationAction(ISD::SMUL_LOHI, VT, Expand);
setOperationAction(ISD::MULHS, VT, Expand);
setOperationAction(ISD::UMUL_LOHI, VT, Expand);
setOperationAction(ISD::MULHU, VT, Expand);
setOperationAction(ISD::SDIVREM, VT, Expand);
setOperationAction(ISD::UDIVREM, VT, Expand);
setOperationAction(ISD::CTPOP, VT, Expand);
setOperationAction(ISD::CTTZ, VT, Expand);
setOperationAction(ISD::CTLZ, VT, Expand);
setOperationAction(ISD::ROTL, VT, Expand);
setOperationAction(ISD::ROTR, VT, Expand);
setOperationAction(ISD::BSWAP, VT, Expand);
setOperationAction(ISD::SETCC, VT, Expand);
setOperationAction(ISD::FP_TO_UINT, VT, Expand);
setOperationAction(ISD::FP_TO_SINT, VT, Expand);
setOperationAction(ISD::UINT_TO_FP, VT, Expand);
setOperationAction(ISD::SINT_TO_FP, VT, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
setOperationAction(ISD::TRUNCATE, VT, Expand);
setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
setOperationAction(ISD::ANY_EXTEND, VT, Expand);
setOperationAction(ISD::SELECT_CC, VT, Expand);
for (MVT InnerVT : MVT::vector_valuetypes()) {
setTruncStoreAction(InnerVT, VT, Expand);
setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
// N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
// types, we have to deal with them whether we ask for Expansion or not.
// Setting Expand causes its own optimisation problems though, so leave
// them legal.
if (VT.getVectorElementType() == MVT::i1)
setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
// EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
// split/scalarized right now.
if (VT.getVectorElementType() == MVT::f16)
setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
}
}
// FIXME: In order to prevent SSE instructions being expanded to MMX ones
// with -msoft-float, disable use of MMX as well.
if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
// No operations on x86mmx supported, everything uses intrinsics.
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
setOperationAction(ISD::FABS, MVT::v4f32, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
// FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
// registers cannot be used even for integer operations.
addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
: &X86::VR128RegClass);
setOperationAction(ISD::MUL, MVT::v16i8, Custom);
setOperationAction(ISD::MUL, MVT::v4i32, Custom);
setOperationAction(ISD::MUL, MVT::v2i64, Custom);
setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
setOperationAction(ISD::MUL, MVT::v8i16, Legal);
setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
setOperationAction(ISD::FABS, MVT::v2f64, Custom);
setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
}
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
// Provide custom widening for v2f32 setcc. This is really for VLX when
// setcc result type returns v2i1/v4i1 vector for v2f32/v4f32 leading to
// type legalization changing the result type to v4i1 during widening.
// It works fine for SSE2 and is probably faster so no need to qualify with
// VLX support.
setOperationAction(ISD::SETCC, MVT::v2i32, Custom);
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::CTPOP, VT, Custom);
setOperationAction(ISD::CTTZ, VT, Custom);
// The condition codes aren't legal in SSE/AVX and under AVX512 we use
// setcc all the way to isel and prefer SETGT in some isel patterns.
setCondCodeAction(ISD::SETLT, VT, Custom);
setCondCodeAction(ISD::SETLE, VT, Custom);
}
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
}
// We support custom legalizing of sext and anyext loads for specific
// memory vector types which we can load as a scalar (or sequence of
// scalars) and extend in-register to a legal 128-bit vector type. For sext
// loads these must work with a single scalar load.
for (MVT VT : MVT::integer_vector_valuetypes()) {
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
}
for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
if (VT == MVT::v2i64 && !Subtarget.is64Bit())
continue;
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
}
// Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
setOperationPromotedToType(ISD::AND, VT, MVT::v2i64);
setOperationPromotedToType(ISD::OR, VT, MVT::v2i64);
setOperationPromotedToType(ISD::XOR, VT, MVT::v2i64);
setOperationPromotedToType(ISD::LOAD, VT, MVT::v2i64);
setOperationPromotedToType(ISD::SELECT, VT, MVT::v2i64);
}
// Custom lower v2i64 and v2f64 selects.
setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
// Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
for (MVT VT : MVT::fp_vector_valuetypes())
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
if (!Subtarget.hasAVX512())
setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
// In the customized shift lowering, the legal v4i32/v2i64 cases
// in AVX2 will be recognized.
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
setOperationAction(ISD::SRL, VT, Custom);
setOperationAction(ISD::SHL, VT, Custom);
setOperationAction(ISD::SRA, VT, Custom);
}
setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
setOperationAction(ISD::ABS, MVT::v16i8, Legal);
setOperationAction(ISD::ABS, MVT::v8i16, Legal);
setOperationAction(ISD::ABS, MVT::v4i32, Legal);
setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
setOperationAction(ISD::FCEIL, RoundedTy, Legal);
setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
setOperationAction(ISD::FRINT, RoundedTy, Legal);
setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
}
setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
// FIXME: Do we need to handle scalar-to-vector here?
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
// We directly match byte blends in the backend as they match the VSELECT
// condition form.
setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
// SSE41 brings specific instructions for doing vector sign extend even in
// cases where we don't have SRA.
for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
}
for (MVT VT : MVT::integer_vector_valuetypes()) {
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
}
// SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v2i32, MVT::v2i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
}
// i8 vectors are custom because the source register and source
// source memory operand types are not the same width.
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
setOperationAction(ISD::ROTL, VT, Custom);
// XOP can efficiently perform BITREVERSE with VPPERM.
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
setOperationAction(ISD::BITREVERSE, VT, Custom);
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
setOperationAction(ISD::BITREVERSE, VT, Custom);
}
if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
bool HasInt256 = Subtarget.hasInt256();
addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
: &X86::VR256RegClass);
addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
: &X86::VR256RegClass);
addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
: &X86::VR256RegClass);
addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
: &X86::VR256RegClass);
addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
: &X86::VR256RegClass);
addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
: &X86::VR256RegClass);
for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
setOperationAction(ISD::FFLOOR, VT, Legal);
setOperationAction(ISD::FCEIL, VT, Legal);
setOperationAction(ISD::FTRUNC, VT, Legal);
setOperationAction(ISD::FRINT, VT, Legal);
setOperationAction(ISD::FNEARBYINT, VT, Legal);
setOperationAction(ISD::FNEG, VT, Custom);
setOperationAction(ISD::FABS, VT, Custom);
setOperationAction(ISD::FCOPYSIGN, VT, Custom);
}
// (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
// even though v8i16 is a legal type.
setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
if (!Subtarget.hasAVX512())
setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
for (MVT VT : MVT::fp_vector_valuetypes())
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
// In the customized shift lowering, the legal v8i32/v4i64 cases
// in AVX2 will be recognized.
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
setOperationAction(ISD::SRL, VT, Custom);
setOperationAction(ISD::SHL, VT, Custom);
setOperationAction(ISD::SRA, VT, Custom);
}
setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
setOperationAction(ISD::ANY_EXTEND, VT, Custom);
}
setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::CTPOP, VT, Custom);
setOperationAction(ISD::CTTZ, VT, Custom);
setOperationAction(ISD::CTLZ, VT, Custom);
// The condition codes aren't legal in SSE/AVX and under AVX512 we use
// setcc all the way to isel and prefer SETGT in some isel patterns.
setCondCodeAction(ISD::SETLT, VT, Custom);
setCondCodeAction(ISD::SETLE, VT, Custom);
}
if (Subtarget.hasAnyFMA()) {
for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
MVT::v2f64, MVT::v4f64 })
setOperationAction(ISD::FMA, VT, Legal);
}
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
}
setOperationAction(ISD::MUL, MVT::v4i64, Custom);
setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
setOperationAction(ISD::MUL, MVT::v32i8, Custom);
setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
setOperationAction(ISD::SMAX, MVT::v4i64, Custom);
setOperationAction(ISD::UMAX, MVT::v4i64, Custom);
setOperationAction(ISD::SMIN, MVT::v4i64, Custom);
setOperationAction(ISD::UMIN, MVT::v4i64, Custom);
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
}
if (HasInt256) {
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i64, Custom);
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i32, Custom);
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v16i16, Custom);
// The custom lowering for UINT_TO_FP for v8i32 becomes interesting
// when we have a 256bit-wide blend with immediate.
setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
// AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
}
}
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
setOperationAction(ISD::MLOAD, VT, Legal);
setOperationAction(ISD::MSTORE, VT, Legal);
}
// Extract subvector is special because the value type
// (result) is 128-bit but the source is 256-bit wide.
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
MVT::v4f32, MVT::v2f64 }) {
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
}
// Custom lower several nodes for 256-bit types.
for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
MVT::v8f32, MVT::v4f64 }) {
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
}
if (HasInt256)
setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
// Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
setOperationPromotedToType(ISD::AND, VT, MVT::v4i64);
setOperationPromotedToType(ISD::OR, VT, MVT::v4i64);
setOperationPromotedToType(ISD::XOR, VT, MVT::v4i64);
setOperationPromotedToType(ISD::LOAD, VT, MVT::v4i64);
setOperationPromotedToType(ISD::SELECT, VT, MVT::v4i64);
}
if (HasInt256) {
// Custom legalize 2x32 to get a little better code.
setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
setOperationAction(ISD::MGATHER, VT, Custom);
}
}
// This block controls legalization of the mask vector sizes that are
// available with AVX512. 512-bit vectors are in a separate block controlled
// by useAVX512Regs.
if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32);
setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32);
setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32);
setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32);
setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
// There is no byte sized k-register load or store without AVX512DQ.
if (!Subtarget.hasDQI()) {
setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
setOperationAction(ISD::STORE, MVT::v1i1, Custom);
setOperationAction(ISD::STORE, MVT::v2i1, Custom);
setOperationAction(ISD::STORE, MVT::v4i1, Custom);
setOperationAction(ISD::STORE, MVT::v8i1, Custom);
}
// Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
setOperationAction(ISD::ANY_EXTEND, VT, Custom);
}
for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
setOperationAction(ISD::ADD, VT, Custom);
setOperationAction(ISD::SUB, VT, Custom);
setOperationAction(ISD::MUL, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::SELECT, VT, Custom);
setOperationAction(ISD::TRUNCATE, VT, Custom);
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Expand);
}
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v2i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16i1, Custom);
for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
}
// This block controls legalization for 512-bit operations with 32/64 bit
// elements. 512-bits can be disabled based on prefer-vector-width and
// required-vector-width function attributes.
if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
for (MVT VT : MVT::fp_vector_valuetypes())
setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
}
for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
setOperationAction(ISD::FNEG, VT, Custom);
setOperationAction(ISD::FABS, VT, Custom);
setOperationAction(ISD::FMA, VT, Legal);
setOperationAction(ISD::FCOPYSIGN, VT, Custom);
}
setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i16, MVT::v16i32);
setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i8, MVT::v16i32);
setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i1, MVT::v16i32);
setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i1, MVT::v16i32);
setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i8, MVT::v16i32);
setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i16, MVT::v16i32);
setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
if (!Subtarget.hasVLX()) {
// With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
// to 512-bit rather than use the AVX2 instructions so that we can use
// k-masks.
for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
setOperationAction(ISD::MLOAD, VT, Custom);
setOperationAction(ISD::MSTORE, VT, Custom);
}
}
setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
setOperationAction(ISD::FFLOOR, VT, Legal);
setOperationAction(ISD::FCEIL, VT, Legal);
setOperationAction(ISD::FTRUNC, VT, Legal);
setOperationAction(ISD::FRINT, VT, Legal);
setOperationAction(ISD::FNEARBYINT, VT, Legal);
}
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i64, Custom);
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v16i32, Custom);
// Without BWI we need to use custom lowering to handle MVT::v64i8 input.
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v64i8, Custom);
setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v64i8, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
setOperationAction(ISD::MUL, MVT::v8i64, Custom);
setOperationAction(ISD::MUL, MVT::v16i32, Legal);
setOperationAction(ISD::UMUL_LOHI, MVT::v16i32, Custom);
setOperationAction(ISD::SMUL_LOHI, MVT::v16i32, Custom);
setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
setOperationAction(ISD::SMAX, VT, Legal);
setOperationAction(ISD::UMAX, VT, Legal);
setOperationAction(ISD::SMIN, VT, Legal);
setOperationAction(ISD::UMIN, VT, Legal);
setOperationAction(ISD::ABS, VT, Legal);
setOperationAction(ISD::SRL, VT, Custom);
setOperationAction(ISD::SHL, VT, Custom);
setOperationAction(ISD::SRA, VT, Custom);
setOperationAction(ISD::CTPOP, VT, Custom);
setOperationAction(ISD::CTTZ, VT, Custom);
setOperationAction(ISD::ROTL, VT, Custom);
setOperationAction(ISD::ROTR, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
// The condition codes aren't legal in SSE/AVX and under AVX512 we use
// setcc all the way to isel and prefer SETGT in some isel patterns.
setCondCodeAction(ISD::SETLT, VT, Custom);
setCondCodeAction(ISD::SETLE, VT, Custom);
}
// Need to promote to 64-bit even though we have 32-bit masked instructions
// because the IR optimizers rearrange bitcasts around logic ops leaving
// too many variations to handle if we don't promote them.
setOperationPromotedToType(ISD::AND, MVT::v16i32, MVT::v8i64);
setOperationPromotedToType(ISD::OR, MVT::v16i32, MVT::v8i64);
setOperationPromotedToType(ISD::XOR, MVT::v16i32, MVT::v8i64);
if (Subtarget.hasDQI()) {
setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
setOperationAction(ISD::MUL, MVT::v8i64, Legal);
}
if (Subtarget.hasCDI()) {
// NonVLX sub-targets extend 128/256 vectors to use the 512 version.
for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
setOperationAction(ISD::CTLZ, VT, Legal);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
}
} // Subtarget.hasCDI()
if (Subtarget.hasVPOPCNTDQ()) {
for (auto VT : { MVT::v16i32, MVT::v8i64 })
setOperationAction(ISD::CTPOP, VT, Legal);
}
// Extract subvector is special because the value type
// (result) is 256-bit but the source is 512-bit wide.
// 128-bit was made Legal under AVX1.
for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
MVT::v8f32, MVT::v4f64 })
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
setOperationAction(ISD::MLOAD, VT, Legal);
setOperationAction(ISD::MSTORE, VT, Legal);
setOperationAction(ISD::MGATHER, VT, Custom);
setOperationAction(ISD::MSCATTER, VT, Custom);
}
for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32 }) {
setOperationPromotedToType(ISD::LOAD, VT, MVT::v8i64);
setOperationPromotedToType(ISD::SELECT, VT, MVT::v8i64);
}
// Need to custom split v32i16/v64i8 bitcasts.
if (!Subtarget.hasBWI()) {
setOperationAction(ISD::BITCAST, MVT::v32i16, Custom);
setOperationAction(ISD::BITCAST, MVT::v64i8, Custom);
}
}// has AVX-512
// This block controls legalization for operations that don't have
// pre-AVX512 equivalents. Without VLX we use 512-bit operations for
// narrower widths.
if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
// These operations are handled on non-VLX by artificially widening in
// isel patterns.
// TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
setOperationAction(ISD::SMAX, VT, Legal);
setOperationAction(ISD::UMAX, VT, Legal);
setOperationAction(ISD::SMIN, VT, Legal);
setOperationAction(ISD::UMIN, VT, Legal);
setOperationAction(ISD::ABS, VT, Legal);
}
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
setOperationAction(ISD::ROTL, VT, Custom);
setOperationAction(ISD::ROTR, VT, Custom);
}
// Custom legalize 2x32 to get a little better code.
setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
setOperationAction(ISD::MSCATTER, VT, Custom);
if (Subtarget.hasDQI()) {
for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
setOperationAction(ISD::SINT_TO_FP, VT, Legal);
setOperationAction(ISD::UINT_TO_FP, VT, Legal);
setOperationAction(ISD::FP_TO_SINT, VT, Legal);
setOperationAction(ISD::FP_TO_UINT, VT, Legal);
setOperationAction(ISD::MUL, VT, Legal);
}
}
if (Subtarget.hasCDI()) {
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
setOperationAction(ISD::CTLZ, VT, Legal);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Custom);
}
} // Subtarget.hasCDI()
if (Subtarget.hasVPOPCNTDQ()) {
for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
setOperationAction(ISD::CTPOP, VT, Legal);
}
}
// This block control legalization of v32i1/v64i1 which are available with
// AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
// useBWIRegs.
if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
setOperationAction(ISD::ADD, VT, Custom);
setOperationAction(ISD::SUB, VT, Custom);
setOperationAction(ISD::MUL, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Expand);
setOperationAction(ISD::TRUNCATE, VT, Custom);
setOperationAction(ISD::SETCC, VT, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
setOperationAction(ISD::SELECT, VT, Custom);
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
}
setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom);
for (auto VT : { MVT::v16i1, MVT::v32i1 })
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
// Extends from v32i1 masks to 256-bit vectors.
setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
}
// This block controls legalization for v32i16 and v64i8. 512-bits can be
// disabled based on prefer-vector-width and required-vector-width function
// attributes.
if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) {
addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
// Extends from v64i1 masks to 512-bit vectors.
setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom);
setOperationAction(ISD::MUL, MVT::v32i16, Legal);
setOperationAction(ISD::MUL, MVT::v64i8, Custom);
setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom);
setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Legal);
setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Legal);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom);
setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
setOperationAction(ISD::ABS, VT, Legal);
setOperationAction(ISD::SRL, VT, Custom);
setOperationAction(ISD::SHL, VT, Custom);
setOperationAction(ISD::SRA, VT, Custom);
setOperationAction(ISD::MLOAD, VT, Legal);
setOperationAction(ISD::MSTORE, VT, Legal);
setOperationAction(ISD::CTPOP, VT, Custom);
setOperationAction(ISD::CTTZ, VT, Custom);
setOperationAction(ISD::CTLZ, VT, Custom);
setOperationAction(ISD::SMAX, VT, Legal);
setOperationAction(ISD::UMAX, VT, Legal);
setOperationAction(ISD::SMIN, VT, Legal);
setOperationAction(ISD::UMIN, VT, Legal);
setOperationAction(ISD::SETCC, VT, Custom);
setOperationPromotedToType(ISD::AND, VT, MVT::v8i64);
setOperationPromotedToType(ISD::OR, VT, MVT::v8i64);
setOperationPromotedToType(ISD::XOR, VT, MVT::v8i64);
}
for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
}
if (Subtarget.hasBITALG()) {
for (auto VT : { MVT::v64i8, MVT::v32i16 })
setOperationAction(ISD::CTPOP, VT, Legal);
}
}
if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
}
// These operations are handled on non-VLX by artificially widening in
// isel patterns.
// TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
if (Subtarget.hasBITALG()) {
for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
setOperationAction(ISD::CTPOP, VT, Legal);
}
}
if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
if (Subtarget.hasDQI()) {
// Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
// v2f32 UINT_TO_FP is already custom under SSE2.
setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
"Unexpected operation action!");
// v2i64 FP_TO_S/UINT(v2f32) custom conversion.
setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
}
if (Subtarget.hasBWI()) {
setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
}
}
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
if (!Subtarget.is64Bit()) {
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
}
// Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
// handle type legalization for these operations here.
//
// FIXME: We really should do custom legalization for addition and
// subtraction on x86-32 once PR3203 is fixed. We really can't do much better
// than generic legalization for 64-bit multiplication-with-overflow, though.
for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
if (VT == MVT::i64 && !Subtarget.is64Bit())
continue;
// Add/Sub/Mul with overflow operations are custom lowered.
setOperationAction(ISD::SADDO, VT, Custom);
setOperationAction(ISD::UADDO, VT, Custom);
setOperationAction(ISD::SSUBO, VT, Custom);
setOperationAction(ISD::USUBO, VT, Custom);
setOperationAction(ISD::SMULO, VT, Custom);
setOperationAction(ISD::UMULO, VT, Custom);
// Support carry in as value rather than glue.
setOperationAction(ISD::ADDCARRY, VT, Custom);
setOperationAction(ISD::SUBCARRY, VT, Custom);
setOperationAction(ISD::SETCCCARRY, VT, Custom);
}
if (!Subtarget.is64Bit()) {
// These libcalls are not available in 32-bit.
setLibcallName(RTLIB::SHL_I128, nullptr);
setLibcallName(RTLIB::SRL_I128, nullptr);
setLibcallName(RTLIB::SRA_I128, nullptr);
setLibcallName(RTLIB::MUL_I128, nullptr);
}
// Combine sin / cos into _sincos_stret if it is available.
if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
}
if (Subtarget.isTargetWin64()) {
setOperationAction(ISD::SDIV, MVT::i128, Custom);
setOperationAction(ISD::UDIV, MVT::i128, Custom);
setOperationAction(ISD::SREM, MVT::i128, Custom);
setOperationAction(ISD::UREM, MVT::i128, Custom);
setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
}
// On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
// is. We should promote the value to 64-bits to solve this.
// This is what the CRT headers do - `fmodf` is an inline header
// function casting to f64 and calling `fmod`.
if (Subtarget.is32Bit() && (Subtarget.isTargetKnownWindowsMSVC() ||
Subtarget.isTargetWindowsItanium()))
for (ISD::NodeType Op :
{ISD::FCEIL, ISD::FCOS, ISD::FEXP, ISD::FFLOOR, ISD::FREM, ISD::FLOG,
ISD::FLOG10, ISD::FPOW, ISD::FSIN})
if (isOperationExpand(Op, MVT::f32))
setOperationAction(Op, MVT::f32, Promote);
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
setTargetDAGCombine(ISD::BITCAST);
setTargetDAGCombine(ISD::VSELECT);
setTargetDAGCombine(ISD::SELECT);
setTargetDAGCombine(ISD::SHL);
setTargetDAGCombine(ISD::SRA);
setTargetDAGCombine(ISD::SRL);
setTargetDAGCombine(ISD::OR);
setTargetDAGCombine(ISD::AND);
setTargetDAGCombine(ISD::ADD);
setTargetDAGCombine(ISD::FADD);
setTargetDAGCombine(ISD::FSUB);
setTargetDAGCombine(ISD::FNEG);
setTargetDAGCombine(ISD::FMA);
setTargetDAGCombine(ISD::FMINNUM);
setTargetDAGCombine(ISD::FMAXNUM);
setTargetDAGCombine(ISD::SUB);
setTargetDAGCombine(ISD::LOAD);
setTargetDAGCombine(ISD::MLOAD);
setTargetDAGCombine(ISD::STORE);
setTargetDAGCombine(ISD::MSTORE);
setTargetDAGCombine(ISD::TRUNCATE);
setTargetDAGCombine(ISD::ZERO_EXTEND);
setTargetDAGCombine(ISD::ANY_EXTEND);
setTargetDAGCombine(ISD::SIGN_EXTEND);
setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG);
setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG);
setTargetDAGCombine(ISD::SINT_TO_FP);
setTargetDAGCombine(ISD::UINT_TO_FP);
setTargetDAGCombine(ISD::SETCC);
setTargetDAGCombine(ISD::MUL);
setTargetDAGCombine(ISD::XOR);
setTargetDAGCombine(ISD::MSCATTER);
setTargetDAGCombine(ISD::MGATHER);
computeRegisterProperties(Subtarget.getRegisterInfo());
MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
MaxStoresPerMemsetOptSize = 8;
MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
MaxStoresPerMemcpyOptSize = 4;
MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
MaxStoresPerMemmoveOptSize = 4;
// TODO: These control memcmp expansion in CGP and could be raised higher, but
// that needs to benchmarked and balanced with the potential use of vector
// load/store types (PR33329, PR33914).
MaxLoadsPerMemcmp = 2;
MaxLoadsPerMemcmpOptSize = 2;
// Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
setPrefLoopAlignment(ExperimentalPrefLoopAlignment);
// An out-of-order CPU can speculatively execute past a predictable branch,
// but a conditional move could be stalled by an expensive earlier operation.
PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
EnableExtLdPromotion = true;
setPrefFunctionAlignment(4); // 2^4 bytes.
verifyIntrinsicTables();
}
// This has so far only been implemented for 64-bit MachO.
bool X86TargetLowering::useLoadStackGuardNode() const {
return Subtarget.isTargetMachO() && Subtarget.is64Bit();
}
bool X86TargetLowering::useStackGuardXorFP() const {
// Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
return Subtarget.getTargetTriple().isOSMSVCRT();
}
SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
const SDLoc &DL) const {
EVT PtrTy = getPointerTy(DAG.getDataLayout());
unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
return SDValue(Node, 0);
}
TargetLoweringBase::LegalizeTypeAction
X86TargetLowering::getPreferredVectorAction(EVT VT) const {
if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
return TypeSplitVector;
if (ExperimentalVectorWideningLegalization &&
VT.getVectorNumElements() != 1 &&
VT.getVectorElementType().getSimpleVT() != MVT::i1)
return TypeWidenVector;
return TargetLoweringBase::getPreferredVectorAction(VT);
}
MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
return MVT::v32i8;
return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
}
unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
CallingConv::ID CC,
EVT VT) const {
if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
return 1;
return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
}
EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
LLVMContext& Context,
EVT VT) const {
if (!VT.isVector())
return MVT::i8;
if (Subtarget.hasAVX512()) {
const unsigned NumElts = VT.getVectorNumElements();
// Figure out what this type will be legalized to.
EVT LegalVT = VT;
while (getTypeAction(Context, LegalVT) != TypeLegal)
LegalVT = getTypeToTransformTo(Context, LegalVT);
// If we got a 512-bit vector then we'll definitely have a vXi1 compare.
if (LegalVT.getSimpleVT().is512BitVector())
return EVT::getVectorVT(Context, MVT::i1, NumElts);
if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
// If we legalized to less than a 512-bit vector, then we will use a vXi1
// compare for vXi32/vXi64 for sure. If we have BWI we will also support
// vXi16/vXi8.
MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
return EVT::getVectorVT(Context, MVT::i1, NumElts);
}
}
return VT.changeVectorElementTypeToInteger();
}
/// Helper for getByValTypeAlignment to determine
/// the desired ByVal argument alignment.
static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
if (MaxAlign == 16)
return;
if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
if (VTy->getBitWidth() == 128)
MaxAlign = 16;
} else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
unsigned EltAlign = 0;
getMaxByValAlign(ATy->getElementType(), EltAlign);
if (EltAlign > MaxAlign)
MaxAlign = EltAlign;
} else if (StructType *STy = dyn_cast<StructType>(Ty)) {
for (auto *EltTy : STy->elements()) {
unsigned EltAlign = 0;
getMaxByValAlign(EltTy, EltAlign);
if (EltAlign > MaxAlign)
MaxAlign = EltAlign;
if (MaxAlign == 16)
break;
}
}
}
/// Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. For X86, aggregates
/// that contain SSE vectors are placed at 16-byte boundaries while the rest
/// are at 4-byte boundaries.
unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
const DataLayout &DL) const {
if (Subtarget.is64Bit()) {
// Max of 8 and alignment of type.
unsigned TyAlign = DL.getABITypeAlignment(Ty);
if (TyAlign > 8)
return TyAlign;
return 8;
}
unsigned Align = 4;
if (Subtarget.hasSSE1())
getMaxByValAlign(Ty, Align);
return Align;
}
/// Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
/// lowering. If DstAlign is zero that means it's safe to destination
/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
/// means there isn't a need to check it against alignment requirement,
/// probably because the source does not need to be loaded. If 'IsMemset' is
/// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
/// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
/// source is constant so it does not need to be loaded.
/// It returns EVT::Other if the type should be determined using generic
/// target-independent logic.
EVT
X86TargetLowering::getOptimalMemOpType(uint64_t Size,
unsigned DstAlign, unsigned SrcAlign,
bool IsMemset, bool ZeroMemset,
bool MemcpyStrSrc,
MachineFunction &MF) const {
const Function &F = MF.getFunction();
if (!F.hasFnAttribute(Attribute::NoImplicitFloat)) {
if (Size >= 16 &&
(!Subtarget.isUnalignedMem16Slow() ||
((DstAlign == 0 || DstAlign >= 16) &&
(SrcAlign == 0 || SrcAlign >= 16)))) {
// FIXME: Check if unaligned 32-byte accesses are slow.
if (Size >= 32 && Subtarget.hasAVX()) {
// Although this isn't a well-supported type for AVX1, we'll let
// legalization and shuffle lowering produce the optimal codegen. If we
// choose an optimal type with a vector element larger than a byte,
// getMemsetStores() may create an intermediate splat (using an integer
// multiply) before we splat as a vector.
return MVT::v32i8;
}
if (Subtarget.hasSSE2())
return MVT::v16i8;
// TODO: Can SSE1 handle a byte vector?
if (Subtarget.hasSSE1())
return MVT::v4f32;
} else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
!Subtarget.is64Bit() && Subtarget.hasSSE2()) {
// Do not use f64 to lower memcpy if source is string constant. It's
// better to use i32 to avoid the loads.
// Also, do not use f64 to lower memset unless this is a memset of zeros.
// The gymnastics of splatting a byte value into an XMM register and then
// only using 8-byte stores (because this is a CPU with slow unaligned
// 16-byte accesses) makes that a loser.
return MVT::f64;
}
}
// This is a compromise. If we reach here, unaligned accesses may be slow on
// this target. However, creating smaller, aligned accesses could be even
// slower and would certainly be a lot more code.
if (Subtarget.is64Bit() && Size >= 8)
return MVT::i64;
return MVT::i32;
}
bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
if (VT == MVT::f32)
return X86ScalarSSEf32;
else if (VT == MVT::f64)
return X86ScalarSSEf64;
return true;
}
bool
X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
unsigned,
unsigned,
bool *Fast) const {
if (Fast) {
switch (VT.getSizeInBits()) {
default:
// 8-byte and under are always assumed to be fast.
*Fast = true;
break;
case 128:
*Fast = !Subtarget.isUnalignedMem16Slow();
break;
case 256:
*Fast = !Subtarget.isUnalignedMem32Slow();
break;
// TODO: What about AVX-512 (512-bit) accesses?
}
}
// Misaligned accesses of any size are always allowed.
return true;
}
/// Return the entry encoding for a jump table in the
/// current function. The returned value is a member of the
/// MachineJumpTableInfo::JTEntryKind enum.
unsigned X86TargetLowering::getJumpTableEncoding() const {
// In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
// symbol.
if (isPositionIndependent() && Subtarget.isPICStyleGOT())
return MachineJumpTableInfo::EK_Custom32;
// Otherwise, use the normal jump table encoding heuristics.
return TargetLowering::getJumpTableEncoding();
}
bool X86TargetLowering::useSoftFloat() const {
return Subtarget.useSoftFloat();
}
void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
ArgListTy &Args) const {
// Only relabel X86-32 for C / Stdcall CCs.
if (Subtarget.is64Bit())
return;
if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
return;
unsigned ParamRegs = 0;
if (auto *M = MF->getFunction().getParent())
ParamRegs = M->getNumberRegisterParameters();
// Mark the first N int arguments as having reg
for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
Type *T = Args[Idx].Ty;
if (T->isIntOrPtrTy())
if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
unsigned numRegs = 1;
if (MF->getDataLayout().getTypeAllocSize(T) > 4)
numRegs = 2;
if (ParamRegs < numRegs)
return;
ParamRegs -= numRegs;
Args[Idx].IsInReg = true;
}
}
}
const MCExpr *
X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
unsigned uid,MCContext &Ctx) const{
assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
// In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
// entries.
return MCSymbolRefExpr::create(MBB->getSymbol(),
MCSymbolRefExpr::VK_GOTOFF, Ctx);
}
/// Returns relocation base for the given PIC jumptable.
SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const {
if (!Subtarget.is64Bit())
// This doesn't have SDLoc associated with it, but is not really the
// same as a Register.
return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
getPointerTy(DAG.getDataLayout()));
return Table;
}
/// This returns the relocation base for the given PIC jumptable,
/// the same as getPICJumpTableRelocBase, but as an MCExpr.
const MCExpr *X86TargetLowering::
getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
MCContext &Ctx) const {
// X86-64 uses RIP relative addressing based on the jump table label.
if (Subtarget.isPICStyleRIPRel())
return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
// Otherwise, the reference is relative to the PIC base.
return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
}
std::pair<const TargetRegisterClass *, uint8_t>
X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
MVT VT) const {
const TargetRegisterClass *RRC = nullptr;
uint8_t Cost = 1;
switch (VT.SimpleTy) {
default:
return TargetLowering::findRepresentativeClass(TRI, VT);
case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
break;
case MVT::x86mmx:
RRC = &X86::VR64RegClass;
break;
case MVT::f32: case MVT::f64:
case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
case MVT::v4f32: case MVT::v2f64:
case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
case MVT::v8f32: case MVT::v4f64:
case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
case MVT::v16f32: case MVT::v8f64:
RRC = &X86::VR128XRegClass;
break;
}
return std::make_pair(RRC, Cost);
}
unsigned X86TargetLowering::getAddressSpace() const {
if (Subtarget.is64Bit())
return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
return 256;
}
static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
(TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
}
static Constant* SegmentOffset(IRBuilder<> &IRB,
unsigned Offset, unsigned AddressSpace) {
return ConstantExpr::getIntToPtr(
ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
}
Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
// glibc, bionic, and Fuchsia have a special slot for the stack guard in
// tcbhead_t; use it instead of the usual global variable (see
// sysdeps/{i386,x86_64}/nptl/tls.h)
if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
if (Subtarget.isTargetFuchsia()) {
// <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
return SegmentOffset(IRB, 0x10, getAddressSpace());
} else {
// %fs:0x28, unless we're using a Kernel code model, in which case
// it's %gs:0x28. gs:0x14 on i386.
unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
return SegmentOffset(IRB, Offset, getAddressSpace());
}
}
return TargetLowering::getIRStackGuard(IRB);
}
void X86TargetLowering::insertSSPDeclarations(Module &M) const {
// MSVC CRT provides functionalities for stack protection.
if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
// MSVC CRT has a global variable holding security cookie.
M.getOrInsertGlobal("__security_cookie",
Type::getInt8PtrTy(M.getContext()));
// MSVC CRT has a function to validate security cookie.
auto *SecurityCheckCookie = cast<Function>(
M.getOrInsertFunction("__security_check_cookie",
Type::getVoidTy(M.getContext()),
Type::getInt8PtrTy(M.getContext())));
SecurityCheckCookie->setCallingConv(CallingConv::X86_FastCall);
SecurityCheckCookie->addAttribute(1, Attribute::AttrKind::InReg);
return;
}
// glibc, bionic, and Fuchsia have a special slot for the stack guard.
if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
return;
TargetLowering::insertSSPDeclarations(M);
}
Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
// MSVC CRT has a global variable holding security cookie.
if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
return M.getGlobalVariable("__security_cookie");
}
return TargetLowering::getSDagStackGuard(M);
}
Value *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
// MSVC CRT has a function to validate security cookie.
if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
return M.getFunction("__security_check_cookie");
}
return TargetLowering::getSSPStackGuardCheck(M);
}
Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
if (Subtarget.getTargetTriple().isOSContiki())
return getDefaultSafeStackPointerLocation(IRB, false);
// Android provides a fixed TLS slot for the SafeStack pointer. See the
// definition of TLS_SLOT_SAFESTACK in
// https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
if (Subtarget.isTargetAndroid()) {
// %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
// %gs:0x24 on i386
unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
return SegmentOffset(IRB, Offset, getAddressSpace());
}
// Fuchsia is similar.
if (Subtarget.isTargetFuchsia()) {
// <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
return SegmentOffset(IRB, 0x18, getAddressSpace());
}
return TargetLowering::getSafeStackPointerLocation(IRB);
}
bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
unsigned DestAS) const {
assert(SrcAS != DestAS && "Expected different address spaces!");
return SrcAS < 256 && DestAS < 256;
}
//===----------------------------------------------------------------------===//
// Return Value Calling Convention Implementation
//===----------------------------------------------------------------------===//
#include "X86GenCallingConv.inc"
bool X86TargetLowering::CanLowerReturn(
CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
return CCInfo.CheckReturn(Outs, RetCC_X86);
}
const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
return ScratchRegs;
}
/// Lowers masks values (v*i1) to the local register values
/// \returns DAG node after lowering to register type
static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
const SDLoc &Dl, SelectionDAG &DAG) {
EVT ValVT = ValArg.getValueType();
if (ValVT == MVT::v1i1)
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
DAG.getIntPtrConstant(0, Dl));
if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
(ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
// Two stage lowering might be required
// bitcast: v8i1 -> i8 / v16i1 -> i16
// anyextend: i8 -> i32 / i16 -> i32
EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
if (ValLoc == MVT::i32)
ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
return ValToCopy;
}
if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
(ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
// One stage lowering is required
// bitcast: v32i1 -> i32 / v64i1 -> i64
return DAG.getBitcast(ValLoc, ValArg);
}
return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
}
/// Breaks v64i1 value into two registers and adds the new node to the DAG
static void Passv64i1ArgInRegs(
const SDLoc &Dl, SelectionDAG &DAG, SDValue Chain, SDValue &Arg,
SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, CCValAssign &VA,
CCValAssign &NextVA, const X86Subtarget &Subtarget) {
assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
assert(Subtarget.is32Bit() && "Expecting 32 bit target");
assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
assert(VA.isRegLoc() && NextVA.isRegLoc() &&
"The value should reside in two registers");
// Before splitting the value we cast it to i64
Arg = DAG.getBitcast(MVT::i64, Arg);
// Splitting the value into two i32 types
SDValue Lo, Hi;
Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
DAG.getConstant(0, Dl, MVT::i32));
Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
DAG.getConstant(1, Dl, MVT::i32));
// Attach the two i32 types into corresponding registers
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
}
SDValue
X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &dl, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
// In some cases we need to disable registers from the default CSR list.
// For example, when they are used for argument passing.
bool ShouldDisableCalleeSavedRegister =
CallConv == CallingConv::X86_RegCall ||
MF.getFunction().hasFnAttribute("no_caller_saved_registers");
if (CallConv == CallingConv::X86_INTR && !Outs.empty())
report_fatal_error("X86 interrupts may not return any value");
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
CCInfo.AnalyzeReturn(Outs, RetCC_X86);
SDValue Flag;
SmallVector<SDValue, 6> RetOps;
RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
// Operand #1 = Bytes To Pop
RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
MVT::i32));
// Copy the result values into the output registers.
for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
++I, ++OutsIndex) {
CCValAssign &VA = RVLocs[I];
assert(VA.isRegLoc() && "Can only return in registers!");
// Add the register to the CalleeSaveDisableRegs list.
if (ShouldDisableCalleeSavedRegister)
MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
SDValue ValToCopy = OutVals[OutsIndex];
EVT ValVT = ValToCopy.getValueType();
// Promote values to the appropriate types.
if (VA.getLocInfo() == CCValAssign::SExt)
ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
else if (VA.getLocInfo() == CCValAssign::ZExt)
ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
else if (VA.getLocInfo() == CCValAssign::AExt) {
if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
else
ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
}
else if (VA.getLocInfo() == CCValAssign::BCvt)
ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
assert(VA.getLocInfo() != CCValAssign::FPExt &&
"Unexpected FP-extend for return value.");
// If this is x86-64, and we disabled SSE, we can't return FP values,
// or SSE or MMX vectors.
if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
(Subtarget.is64Bit() && !Subtarget.hasSSE1())) {
errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
} else if (ValVT == MVT::f64 &&
(Subtarget.is64Bit() && !Subtarget.hasSSE2())) {
// Likewise we can't return F64 values with SSE1 only. gcc does so, but
// llvm-gcc has never done it right and no one has noticed, so this
// should be OK for now.
errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
}
// Returns in ST0/ST1 are handled specially: these are pushed as operands to
// the RET instruction and handled by the FP Stackifier.
if (VA.getLocReg() == X86::FP0 ||
VA.getLocReg() == X86::FP1) {
// If this is a copy from an xmm register to ST(0), use an FPExtend to
// change the value to the FP stack register class.
if (isScalarFPTypeInSSEReg(VA.getValVT()))
ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
RetOps.push_back(ValToCopy);
// Don't emit a copytoreg.
continue;
}
// 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
// which is returned in RAX / RDX.
if (Subtarget.is64Bit()) {
if (ValVT == MVT::x86mmx) {
if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
ValToCopy);
// If we don't have SSE2 available, convert to v4f32 so the generated
// register is legal.
if (!Subtarget.hasSSE2())
ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
}
}
}
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
if (VA.needsCustom()) {
assert(VA.getValVT() == MVT::v64i1 &&
"Currently the only custom case is when we split v64i1 to 2 regs");
Passv64i1ArgInRegs(dl, DAG, Chain, ValToCopy, RegsToPass, VA, RVLocs[++I],
Subtarget);
assert(2 == RegsToPass.size() &&
"Expecting two registers after Pass64BitArgInRegs");
// Add the second register to the CalleeSaveDisableRegs list.
if (ShouldDisableCalleeSavedRegister)
MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
} else {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
}
// Add nodes to the DAG and add the values into the RetOps list
for (auto &Reg : RegsToPass) {
Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, Flag);
Flag = Chain.getValue(1);
RetOps.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
}
}
// Swift calling convention does not require we copy the sret argument
// into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
// All x86 ABIs require that for returning structs by value we copy
// the sret argument into %rax/%eax (depending on ABI) for the return.
// We saved the argument into a virtual register in the entry block,
// so now we copy the value out and into %rax/%eax.
//
// Checking Function.hasStructRetAttr() here is insufficient because the IR
// may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
// false, then an sret argument may be implicitly inserted in the SelDAG. In
// either case FuncInfo->setSRetReturnReg() will have been called.
if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
// When we have both sret and another return value, we should use the
// original Chain stored in RetOps[0], instead of the current Chain updated
// in the above loop. If we only have sret, RetOps[0] equals to Chain.
// For the case of sret and another return value, we have
// Chain_0 at the function entry
// Chain_1 = getCopyToReg(Chain_0) in the above loop
// If we use Chain_1 in getCopyFromReg, we will have
// Val = getCopyFromReg(Chain_1)
// Chain_2 = getCopyToReg(Chain_1, Val) from below
// getCopyToReg(Chain_0) will be glued together with
// getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
// in Unit B, and we will have cyclic dependency between Unit A and Unit B:
// Data dependency from Unit B to Unit A due to usage of Val in
// getCopyToReg(Chain_1, Val)
// Chain dependency from Unit A to Unit B
// So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
getPointerTy(MF.getDataLayout()));
unsigned RetValReg
= (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
X86::RAX : X86::EAX;
Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
Flag = Chain.getValue(1);
// RAX/EAX now acts like a return value.
RetOps.push_back(
DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
// Add the returned register to the CalleeSaveDisableRegs list.
if (ShouldDisableCalleeSavedRegister)
MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
}
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
const MCPhysReg *I =
TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
if (I) {
for (; *I; ++I) {
if (X86::GR64RegClass.contains(*I))
RetOps.push_back(DAG.getRegister(*I, MVT::i64));
else
llvm_unreachable("Unexpected register class in CSRsViaCopy!");
}
}
RetOps[0] = Chain; // Update chain.
// Add the flag if we have it.
if (Flag.getNode())
RetOps.push_back(Flag);
X86ISD::NodeType opcode = X86ISD::RET_FLAG;
if (CallConv == CallingConv::X86_INTR)
opcode = X86ISD::IRET;
return DAG.getNode(opcode, dl, MVT::Other, RetOps);
}
bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
return false;
SDValue TCChain = Chain;
SDNode *Copy = *N->use_begin();
if (Copy->getOpcode() == ISD::CopyToReg) {
// If the copy has a glue operand, we conservatively assume it isn't safe to
// perform a tail call.
if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
return false;
TCChain = Copy->getOperand(0);
} else if (Copy->getOpcode() != ISD::FP_EXTEND)
return false;
bool HasRet = false;
for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
UI != UE; ++UI) {
if (UI->getOpcode() != X86ISD::RET_FLAG)
return false;
// If we are returning more than one value, we can definitely
// not make a tail call see PR19530
if (UI->getNumOperands() > 4)
return false;
if (UI->getNumOperands() == 4 &&
UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
return false;
HasRet = true;
}
if (!HasRet)
return false;
Chain = TCChain;
return true;
}
EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
ISD::NodeType ExtendKind) const {
MVT ReturnMVT = MVT::i32;
bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
// The ABI does not require i1, i8 or i16 to be extended.
//
// On Darwin, there is code in the wild relying on Clang's old behaviour of
// always extending i8/i16 return values, so keep doing that for now.
// (PR26665).
ReturnMVT = MVT::i8;
}
EVT MinVT = getRegisterType(Context, ReturnMVT);
return VT.bitsLT(MinVT) ? MinVT : VT;
}
/// Reads two 32 bit registers and creates a 64 bit mask value.
/// \param VA The current 32 bit value that need to be assigned.
/// \param NextVA The next 32 bit value that need to be assigned.
/// \param Root The parent DAG node.
/// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
/// glue purposes. In the case the DAG is already using
/// physical register instead of virtual, we should glue
/// our new SDValue to InFlag SDvalue.
/// \return a new SDvalue of size 64bit.
static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
SDValue &Root, SelectionDAG &DAG,
const SDLoc &Dl, const X86Subtarget &Subtarget,
SDValue *InFlag = nullptr) {
assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
assert(Subtarget.is32Bit() && "Expecting 32 bit target");
assert(VA.getValVT() == MVT::v64i1 &&
"Expecting first location of 64 bit width type");
assert(NextVA.getValVT() == VA.getValVT() &&
"The locations should have the same type");
assert(VA.isRegLoc() && NextVA.isRegLoc() &&
"The values should reside in two registers");
SDValue Lo, Hi;
unsigned Reg;
SDValue ArgValueLo, ArgValueHi;
MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterClass *RC = &X86::GR32RegClass;
// Read a 32 bit value from the registers.
if (nullptr == InFlag) {
// When no physical register is present,
// create an intermediate virtual register.
Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
} else {
// When a physical register is available read the value from it and glue
// the reads together.
ArgValueLo =
DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
*InFlag = ArgValueLo.getValue(2);
ArgValueHi =
DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
*InFlag = ArgValueHi.getValue(2);
}
// Convert the i32 type into v32i1 type.
Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
// Convert the i32 type into v32i1 type.
Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
// Concatenate the two values together.
return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
}
/// The function will lower a register of various sizes (8/16/32/64)
/// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
/// \returns a DAG node contains the operand after lowering to mask type.
static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
const EVT &ValLoc, const SDLoc &Dl,
SelectionDAG &DAG) {
SDValue ValReturned = ValArg;
if (ValVT == MVT::v1i1)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
if (ValVT == MVT::v64i1) {
// In 32 bit machine, this case is handled by getv64i1Argument
assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
// In 64 bit machine, There is no need to truncate the value only bitcast
} else {
MVT maskLen;
switch (ValVT.getSimpleVT().SimpleTy) {
case MVT::v8i1:
maskLen = MVT::i8;
break;
case MVT::v16i1:
maskLen = MVT::i16;
break;
case MVT::v32i1:
maskLen = MVT::i32;
break;
default:
llvm_unreachable("Expecting a vector of i1 types");
}
ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
}
return DAG.getBitcast(ValVT, ValReturned);
}
/// Lower the result values of a call into the
/// appropriate copies out of appropriate physical registers.
///
SDValue X86TargetLowering::LowerCallResult(
SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
uint32_t *RegMask) const {
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
// Assign locations to each value returned by this call.
SmallVector<CCValAssign, 16> RVLocs;
bool Is64Bit = Subtarget.is64Bit();
CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
*DAG.getContext());
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
// Copy all of the result registers out of their specified physreg.
for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
++I, ++InsIndex) {
CCValAssign &VA = RVLocs[I];
EVT CopyVT = VA.getLocVT();
// In some calling conventions we need to remove the used registers
// from the register mask.
if (RegMask) {
for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
}
// If this is x86-64, and we disabled SSE, we can't return FP values
if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) &&
((Is64Bit || Ins[InsIndex].Flags.isInReg()) && !Subtarget.hasSSE1())) {
errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
}
// If we prefer to use the value in xmm registers, copy it out as f80 and
// use a truncate to move it from fp stack reg to xmm reg.
bool RoundAfterCopy = false;
if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
isScalarFPTypeInSSEReg(VA.getValVT())) {
if (!Subtarget.hasX87())
report_fatal_error("X87 register return with X87 disabled");
CopyVT = MVT::f80;
RoundAfterCopy = (CopyVT != VA.getLocVT());
}
SDValue Val;
if (VA.needsCustom()) {
assert(VA.getValVT() == MVT::v64i1 &&
"Currently the only custom case is when we split v64i1 to 2 regs");
Val =
getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
} else {
Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
.getValue(1);
Val = Chain.getValue(0);
InFlag = Chain.getValue(2);
}
if (RoundAfterCopy)
Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
// This truncation won't change the value.
DAG.getIntPtrConstant(1, dl));
if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
if (VA.getValVT().isVector() &&
((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
(VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
// promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
} else
Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
}
InVals.push_back(Val);
}
return Chain;
}
//===----------------------------------------------------------------------===//
// C & StdCall & Fast Calling Convention implementation
//===----------------------------------------------------------------------===//
// StdCall calling convention seems to be standard for many Windows' API
// routines and around. It differs from C calling convention just a little:
// callee should clean up the stack, not caller. Symbols should be also
// decorated in some fancy way :) It doesn't support any vector arguments.
// For info on fast calling convention see Fast Calling Convention (tail call)
// implementation LowerX86_32FastCCCallTo.
/// CallIsStructReturn - Determines whether a call uses struct return
/// semantics.
enum StructReturnType {
NotStructReturn,
RegStructReturn,
StackStructReturn
};
static StructReturnType
callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) {
if (Outs.empty())
return NotStructReturn;
const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
if (!Flags.isSRet())
return NotStructReturn;
if (Flags.isInReg() || IsMCU)
return RegStructReturn;
return StackStructReturn;
}
/// Determines whether a function uses struct return semantics.
static StructReturnType
argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) {
if (Ins.empty())
return NotStructReturn;
const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
if (!Flags.isSRet())
return NotStructReturn;
if (Flags.isInReg() || IsMCU)
return RegStructReturn;
return StackStructReturn;
}
/// Make a copy of an aggregate at address specified by "Src" to address
/// "Dst" with size and alignment information specified by the specific
/// parameter attribute. The copy will be passed as a byval function parameter.
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
SDValue Chain, ISD::ArgFlagsTy Flags,
SelectionDAG &DAG, const SDLoc &dl) {
SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
/*isVolatile*/false, /*AlwaysInline=*/true,
/*isTailCall*/false,
MachinePointerInfo(), MachinePointerInfo());
}
/// Return true if the calling convention is one that we can guarantee TCO for.
static bool canGuaranteeTCO(CallingConv::ID CC) {
return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
CC == CallingConv::HHVM);
}
/// Return true if we might ever do TCO for calls with this calling convention.
static bool mayTailCallThisCC(CallingConv::ID CC) {
switch (CC) {
// C calling conventions:
case CallingConv::C:
case CallingConv::Win64:
case CallingConv::X86_64_SysV:
// Callee pop conventions:
case CallingConv::X86_ThisCall:
case CallingConv::X86_StdCall:
case CallingConv::X86_VectorCall:
case CallingConv::X86_FastCall:
return true;
default:
return canGuaranteeTCO(CC);
}
}
/// Return true if the function is being made into a tailcall target by
/// changing its ABI.
static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
return GuaranteedTailCallOpt && canGuaranteeTCO(CC);
}
bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
auto Attr =
CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
if (!CI->isTailCall() || Attr.getValueAsString() == "true")
return false;
ImmutableCallSite CS(CI);
CallingConv::ID CalleeCC = CS.getCallingConv();
if (!mayTailCallThisCC(CalleeCC))
return false;
return true;
}
SDValue
X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
const CCValAssign &VA,
MachineFrameInfo &MFI, unsigned i) const {
// Create the nodes corresponding to a load from this parameter slot.
ISD::ArgFlagsTy Flags = Ins[i].Flags;
bool AlwaysUseMutable = shouldGuaranteeTCO(
CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
EVT ValVT;
MVT PtrVT = getPointerTy(DAG.getDataLayout());
// If value is passed by pointer we have address passed instead of the value
// itself. No need to extend if the mask value and location share the same
// absolute size.
bool ExtendedInMem =
VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
ValVT = VA.getLocVT();
else
ValVT = VA.getValVT();
// Calculate SP offset of interrupt parameter, re-arrange the slot normally
// taken by a return address.
int Offset = 0;
if (CallConv == CallingConv::X86_INTR) {
// X86 interrupts may take one or two arguments.
// On the stack there will be no return address as in regular call.
// Offset of last argument need to be set to -4/-8 bytes.
// Where offset of the first argument out of two, should be set to 0 bytes.
Offset = (Subtarget.is64Bit() ? 8 : 4) * ((i + 1) % Ins.size() - 1);
if (Subtarget.is64Bit() && Ins.size() == 2) {
// The stack pointer needs to be realigned for 64 bit handlers with error
// code, so the argument offset changes by 8 bytes.
Offset += 8;
}
}
// FIXME: For now, all byval parameter objects are marked mutable. This can be
// changed with more analysis.
// In case of tail call optimization mark all arguments mutable. Since they
// could be overwritten by lowering of arguments in case of a tail call.
if (Flags.isByVal()) {
unsigned Bytes = Flags.getByValSize();
if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
// FIXME: For now, all byval parameter objects are marked as aliasing. This
// can be improved with deeper analysis.
int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
/*isAliased=*/true);
// Adjust SP offset of interrupt parameter.
if (CallConv == CallingConv::X86_INTR) {
MFI.setObjectOffset(FI, Offset);
}
return DAG.getFrameIndex(FI, PtrVT);
}
// This is an argument in memory. We might be able to perform copy elision.
if (Flags.isCopyElisionCandidate()) {
EVT ArgVT = Ins[i].ArgVT;
SDValue PartAddr;
if (Ins[i].PartOffset == 0) {
// If this is a one-part value or the first part of a multi-part value,
// create a stack object for the entire argument value type and return a
// load from our portion of it. This assumes that if the first part of an
// argument is in memory, the rest will also be in memory.
int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
/*Immutable=*/false);
PartAddr = DAG.getFrameIndex(FI, PtrVT);
return DAG.getLoad(
ValVT, dl, Chain, PartAddr,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
} else {
// This is not the first piece of an argument in memory. See if there is
// already a fixed stack object including this offset. If so, assume it
// was created by the PartOffset == 0 branch above and create a load from
// the appropriate offset into it.
int64_t PartBegin = VA.getLocMemOffset();
int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
int FI = MFI.getObjectIndexBegin();
for (; MFI.isFixedObjectIndex(FI); ++FI) {
int64_t ObjBegin = MFI.getObjectOffset(FI);
int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
break;
}
if (MFI.isFixedObjectIndex(FI)) {
SDValue Addr =
DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
return DAG.getLoad(
ValVT, dl, Chain, Addr,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
Ins[i].PartOffset));
}
}
}
int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
VA.getLocMemOffset(), isImmutable);
// Set SExt or ZExt flag.
if (VA.getLocInfo() == CCValAssign::ZExt) {
MFI.setObjectZExt(FI, true);
} else if (VA.getLocInfo() == CCValAssign::SExt) {
MFI.setObjectSExt(FI, true);
}
// Adjust SP offset of interrupt parameter.
if (CallConv == CallingConv::X86_INTR) {
MFI.setObjectOffset(FI, Offset);
}
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
SDValue Val = DAG.getLoad(
ValVT, dl, Chain, FIN,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
return ExtendedInMem
? (VA.getValVT().isVector()
? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
: DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
: Val;
}
// FIXME: Get this from tablegen.
static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
const X86Subtarget &Subtarget) {
assert(Subtarget.is64Bit());
if (Subtarget.isCallingConvWin64(CallConv)) {
static const MCPhysReg GPR64ArgRegsWin64[] = {
X86::RCX, X86::RDX, X86::R8, X86::R9
};
return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
}
static const MCPhysReg GPR64ArgRegs64Bit[] = {
X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
};
return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
}
// FIXME: Get this from tablegen.
static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
CallingConv::ID CallConv,
const X86Subtarget &Subtarget) {
assert(Subtarget.is64Bit());
if (Subtarget.isCallingConvWin64(CallConv)) {
// The XMM registers which might contain var arg parameters are shadowed
// in their paired GPR. So we only need to save the GPR to their home
// slots.
// TODO: __vectorcall will change this.
return None;
}
const Function &F = MF.getFunction();
bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
bool isSoftFloat = Subtarget.useSoftFloat();
assert(!(isSoftFloat && NoImplicitFloatOps) &&
"SSE register cannot be used when SSE is disabled!");
if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
// Kernel mode asks for SSE to be disabled, so there are no XMM argument
// registers.
return None;
static const MCPhysReg XMMArgRegs64Bit[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
}
#ifndef NDEBUG
static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
return std::is_sorted(ArgLocs.begin(), ArgLocs.end(),
[](const CCValAssign &A, const CCValAssign &B) -> bool {
return A.getValNo() < B.getValNo();
});
}
#endif
SDValue X86TargetLowering::LowerFormalArguments(
SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
const Function &F = MF.getFunction();
if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
F.getName() == "main")
FuncInfo->setForceFramePointer(true);
MachineFrameInfo &MFI = MF.getFrameInfo();
bool Is64Bit = Subtarget.is64Bit();
bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
assert(
!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
if (CallConv == CallingConv::X86_INTR) {
bool isLegal = Ins.size() == 1 ||
(Ins.size() == 2 && ((Is64Bit && Ins[1].VT == MVT::i64) ||
(!Is64Bit && Ins[1].VT == MVT::i32)));
if (!isLegal)
report_fatal_error("X86 interrupts may take one or two arguments");
}
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64.
if (IsWin64)
CCInfo.AllocateStack(32, 8);
CCInfo.AnalyzeArguments(Ins, CC_X86);
// In vectorcall calling convention a second pass is required for the HVA
// types.
if (CallingConv::X86_VectorCall == CallConv) {
CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
}
// The next loop assumes that the locations are in the same order of the
// input arguments.
assert(isSortedByValueNo(ArgLocs) &&
"Argument Location list must be sorted before lowering");
SDValue ArgValue;
for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
++I, ++InsIndex) {
assert(InsIndex < Ins.size() && "Invalid Ins index");
CCValAssign &VA = ArgLocs[I];
if (VA.isRegLoc()) {
EVT RegVT = VA.getLocVT();
if (VA.needsCustom()) {
assert(
VA.getValVT() == MVT::v64i1 &&
"Currently the only custom case is when we split v64i1 to 2 regs");
// v64i1 values, in regcall calling convention, that are
// compiled to 32 bit arch, are split up into two registers.
ArgValue =
getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
} else {
const TargetRegisterClass *RC;
if (RegVT == MVT::i8)
RC = &X86::GR8RegClass;
else if (RegVT == MVT::i16)
RC = &X86::GR16RegClass;
else if (RegVT == MVT::i32)
RC = &X86::GR32RegClass;
else if (Is64Bit && RegVT == MVT::i64)
RC = &X86::GR64RegClass;
else if (RegVT == MVT::f32)
RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
else if (RegVT == MVT::f64)
RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
else if (RegVT == MVT::f80)
RC = &X86::RFP80RegClass;
else if (RegVT == MVT::f128)
RC = &X86::VR128RegClass;
else if (RegVT.is512BitVector())
RC = &X86::VR512RegClass;
else if (RegVT.is256BitVector())
RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
else if (RegVT.is128BitVector())
RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
else if (RegVT == MVT::x86mmx)
RC = &X86::VR64RegClass;
else if (RegVT == MVT::v1i1)
RC = &X86::VK1RegClass;
else if (RegVT == MVT::v8i1)
RC = &X86::VK8RegClass;
else if (RegVT == MVT::v16i1)
RC = &X86::VK16RegClass;
else if (RegVT == MVT::v32i1)
RC = &X86::VK32RegClass;
else if (RegVT == MVT::v64i1)
RC = &X86::VK64RegClass;
else
llvm_unreachable("Unknown argument type!");
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
}
// If this is an 8 or 16-bit value, it is really passed promoted to 32
// bits. Insert an assert[sz]ext to capture this, then truncate to the
// right size.
if (VA.getLocInfo() == CCValAssign::SExt)
ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
DAG.getValueType(VA.getValVT()));
else if (VA.getLocInfo() == CCValAssign::ZExt)
ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
DAG.getValueType(VA.getValVT()));
else if (VA.getLocInfo() == CCValAssign::BCvt)
ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
if (VA.isExtInLoc()) {
// Handle MMX values passed in XMM regs.
if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
else if (VA.getValVT().isVector() &&
VA.getValVT().getScalarType() == MVT::i1 &&
((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
(VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
// Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
} else
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
}
} else {
assert(VA.isMemLoc());
ArgValue =
LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
}
// If value is passed via pointer - do a load.
if (VA.getLocInfo() == CCValAssign::Indirect)
ArgValue =
DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
InVals.push_back(ArgValue);
}
for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
// Swift calling convention does not require we copy the sret argument
// into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
if (CallConv == CallingConv::Swift)
continue;
// All x86 ABIs require that for returning structs by value we copy the
// sret argument into %rax/%eax (depending on ABI) for the return. Save
// the argument into a virtual register so that we can access it from the
// return points.
if (Ins[I].Flags.isSRet()) {
unsigned Reg = FuncInfo->getSRetReturnReg();
if (!Reg) {
MVT PtrTy = getPointerTy(DAG.getDataLayout());
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
FuncInfo->setSRetReturnReg(Reg);
}
SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
break;
}
}
unsigned StackSize = CCInfo.getNextStackOffset();
// Align stack specially for tail calls.
if (shouldGuaranteeTCO(CallConv,
MF.getTarget().Options.GuaranteedTailCallOpt))
StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
// If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start. We
// can skip this if there are no va_start calls.
if (MFI.hasVAStart() &&
(Is64Bit || (CallConv != CallingConv::X86_FastCall &&
CallConv != CallingConv::X86_ThisCall))) {
FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
}
// Figure out if XMM registers are in use.
assert(!(Subtarget.useSoftFloat() &&
F.hasFnAttribute(Attribute::NoImplicitFloat)) &&
"SSE register cannot be used when SSE is disabled!");
// 64-bit calling conventions support varargs and register parameters, so we
// have to do extra work to spill them in the prologue.
if (Is64Bit && isVarArg && MFI.hasVAStart()) {
// Find the first unallocated argument registers.
ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
"SSE register cannot be used when SSE is disabled!");
// Gather all the live in physical registers.
SmallVector<SDValue, 6> LiveGPRs;
SmallVector<SDValue, 8> LiveXMMRegs;
SDValue ALVal;
for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
LiveGPRs.push_back(
DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
}
if (!ArgXMMs.empty()) {
unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
LiveXMMRegs.push_back(
DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
}
}
if (IsWin64) {
// Get to the caller-allocated home save location. Add 8 to account
// for the return address.
int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
FuncInfo->setRegSaveFrameIndex(
MFI.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
// Fixup to set vararg frame on shadow area (4 x i64).
if (NumIntRegs < 4)
FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
} else {
// For X86-64, if there are vararg parameters that are passed via
// registers, then we must store them to their spots on the stack so
// they may be loaded by dereferencing the result of va_next.
FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
FuncInfo->setRegSaveFrameIndex(MFI.CreateStackObject(
ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
}
// Store the integer parameter registers.
SmallVector<SDValue, 8> MemOps;
SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
getPointerTy(DAG.getDataLayout()));
unsigned Offset = FuncInfo->getVarArgsGPOffset();
for (SDValue Val : LiveGPRs) {
SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
RSFIN, DAG.getIntPtrConstant(Offset, dl));
SDValue Store =
DAG.getStore(Val.getValue(1), dl, Val, FIN,
MachinePointerInfo::getFixedStack(
DAG.getMachineFunction(),
FuncInfo->getRegSaveFrameIndex(), Offset));
MemOps.push_back(Store);
Offset += 8;
}
if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
// Now store the XMM (fp + vector) parameter registers.
SmallVector<SDValue, 12> SaveXMMOps;
SaveXMMOps.push_back(Chain);
SaveXMMOps.push_back(ALVal);
SaveXMMOps.push_back(DAG.getIntPtrConstant(
FuncInfo->getRegSaveFrameIndex(), dl));
SaveXMMOps.push_back(DAG.getIntPtrConstant(
FuncInfo->getVarArgsFPOffset(), dl));
SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
LiveXMMRegs.end());
MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
MVT::Other, SaveXMMOps));
}
if (!MemOps.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
}
if (isVarArg && MFI.hasMustTailInVarArgFunc()) {
// Find the largest legal vector type.
MVT VecVT = MVT::Other;
// FIXME: Only some x86_32 calling conventions support AVX512.
if (Subtarget.hasAVX512() &&
(Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
CallConv == CallingConv::Intel_OCL_BI)))
VecVT = MVT::v16f32;
else if (Subtarget.hasAVX())
VecVT = MVT::v8f32;
else if (Subtarget.hasSSE2())
VecVT = MVT::v4f32;
// We forward some GPRs and some vector types.
SmallVector<MVT, 2> RegParmTypes;
MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
RegParmTypes.push_back(IntVT);
if (VecVT != MVT::Other)
RegParmTypes.push_back(VecVT);
// Compute the set of forwarded registers. The rest are scratch.
SmallVectorImpl<ForwardedRegister> &Forwards =
FuncInfo->getForwardedMustTailRegParms();
CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
// Conservatively forward AL on x86_64, since it might be used for varargs.
if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
}
// Copy all forwards from physical to virtual registers.
for (ForwardedRegister &F : Forwards) {
// FIXME: Can we use a less constrained schedule?
SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
}
}
// Some CCs need callee pop.
if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
MF.getTarget().Options.GuaranteedTailCallOpt)) {
FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
} else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
// X86 interrupts must pop the error code (and the alignment padding) if
// present.
FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
} else {
FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer.
if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
!Subtarget.getTargetTriple().isOSMSVCRT() &&
argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
FuncInfo->setBytesToPopOnReturn(4);
}
if (!Is64Bit) {
// RegSaveFrameIndex is X86-64 only.
FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
if (CallConv == CallingConv::X86_FastCall ||
CallConv == CallingConv::X86_ThisCall)
// fastcc functions can't have varargs.
FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
}
FuncInfo->setArgumentStackSize(StackSize);
if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
if (Personality == EHPersonality::CoreCLR) {
assert(Is64Bit);
// TODO: Add a mechanism to frame lowering that will allow us to indicate
// that we'd prefer this slot be allocated towards the bottom of the frame
// (i.e. near the stack pointer after allocating the frame). Every
// funclet needs a copy of this slot in its (mostly empty) frame, and the
// offset from the bottom of this and each funclet's frame must be the
// same, so the size of funclets' (mostly empty) frames is dictated by
// how far this slot is from the bottom (since they allocate just enough
// space to accommodate holding this slot at the correct offset).
int PSPSymFI = MFI.CreateStackObject(8, 8, /*isSS=*/false);
EHInfo->PSPSymFrameIdx = PSPSymFI;
}
}
if (CallConv == CallingConv::X86_RegCall ||
F.hasFnAttribute("no_caller_saved_registers")) {
MachineRegisterInfo &MRI = MF.getRegInfo();
for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
MRI.disableCalleeSavedRegister(Pair.first);
}
return Chain;
}
SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
SDValue Arg, const SDLoc &dl,
SelectionDAG &DAG,
const CCValAssign &VA,
ISD::ArgFlagsTy Flags) const {
unsigned LocMemOffset = VA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
StackPtr, PtrOff);
if (Flags.isByVal())
return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
return DAG.getStore(
Chain, dl, Arg, PtrOff,
MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
}
/// Emit a load of return address if tail call
/// optimization is performed and it is required.
SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
bool Is64Bit, int FPDiff, const SDLoc &dl) const {
// Adjust the Return address stack slot.
EVT VT = getPointerTy(DAG.getDataLayout());
OutRetAddr = getReturnAddressFrameIndex(DAG);
// Load the "old" Return address.
OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
return SDValue(OutRetAddr.getNode(), 1);
}
/// Emit a store of the return address if tail call
/// optimization is performed and it is required (FPDiff!=0).
static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
SDValue Chain, SDValue RetAddrFrIdx,
EVT PtrVT, unsigned SlotSize,
int FPDiff, const SDLoc &dl) {
// Store the return address to the appropriate stack slot.
if (!FPDiff) return Chain;
// Calculate the new stack slot for the return address.
int NewReturnAddrFI =
MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
false);
SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
MachinePointerInfo::getFixedStack(
DAG.getMachineFunction(), NewReturnAddrFI));
return Chain;
}
/// Returns a vector_shuffle mask for an movs{s|d}, movd
/// operation of specified width.
static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
SDValue V2) {
unsigned NumElems = VT.getVectorNumElements();
SmallVector<int, 8> Mask;
Mask.push_back(NumElems);
for (unsigned i = 1; i != NumElems; ++i)
Mask.push_back(i);
return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}
SDValue
X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const {
SelectionDAG &DAG = CLI.DAG;
SDLoc &dl = CLI.DL;
SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
SDValue Chain = CLI.Chain;
SDValue Callee = CLI.Callee;
CallingConv::ID CallConv = CLI.CallConv;
bool &isTailCall = CLI.IsTailCall;
bool isVarArg = CLI.IsVarArg;
MachineFunction &MF = DAG.getMachineFunction();
bool Is64Bit = Subtarget.is64Bit();
bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
bool IsSibcall = false;
X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
(Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CS.getInstruction());
bool HasNoCfCheck =
(CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
const Module *M = MF.getMMI().getModule();
Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
if (CallConv == CallingConv::X86_INTR)
report_fatal_error("X86 interrupts may not be called directly");
if (Attr.getValueAsString() == "true")
isTailCall = false;
if (Subtarget.isPICStyleGOT() &&
!MF.getTarget().Options.GuaranteedTailCallOpt) {
// If we are using a GOT, disable tail calls to external symbols with
// default visibility. Tail calling such a symbol requires using a GOT
// relocation, which forces early binding of the symbol. This breaks code
// that require lazy function symbol resolution. Using musttail or
// GuaranteedTailCallOpt will override this.
GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
if (!G || (!G->getGlobal()->hasLocalLinkage() &&
G->getGlobal()->hasDefaultVisibility()))
isTailCall = false;
}
bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall();
if (IsMustTail) {
// Force this to be a tail call. The verifier rules are enough to ensure
// that we can lower this successfully without moving the return address
// around.
isTailCall = true;
} else if (isTailCall) {
// Check if it's really possible to do a tail call.
isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
isVarArg, SR != NotStructReturn,
MF.getFunction().hasStructRetAttr(), CLI.RetTy,
Outs, OutVals, Ins, DAG);
// Sibcalls are automatically detected tailcalls which do not require
// ABI changes.
if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
IsSibcall = true;
if (isTailCall)
++NumTailCalls;
}
assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling convention fastcc, ghc or hipe");
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
// Allocate shadow area for Win64.
if (IsWin64)
CCInfo.AllocateStack(32, 8);
CCInfo.AnalyzeArguments(Outs, CC_X86);
// In vectorcall calling convention a second pass is required for the HVA
// types.
if (CallingConv::X86_VectorCall == CallConv) {
CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
}
// Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
if (IsSibcall)
// This is a sibcall. The memory operands are available in caller's
// own caller's stack.
NumBytes = 0;
else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
canGuaranteeTCO(CallConv))
NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
int FPDiff = 0;
if (isTailCall && !IsSibcall && !IsMustTail) {
// Lower arguments at fp - stackoffset + fpdiff.
unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
FPDiff = NumBytesCallerPushed - NumBytes;
// Set the delta of movement of the returnaddr stackslot.
// But only set if delta is greater than previous delta.
if (FPDiff < X86Info->getTCReturnAddrDelta())
X86Info->setTCReturnAddrDelta(FPDiff);
}
unsigned NumBytesToPush = NumBytes;
unsigned NumBytesToPop = NumBytes;
// If we have an inalloca argument, all stack space has already been allocated
// for us and be right at the top of the stack. We don't support multiple
// arguments passed in memory when using inalloca.
if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
NumBytesToPush = 0;
if (!ArgLocs.back().isMemLoc())
report_fatal_error("cannot use inalloca attribute on a register "
"parameter");
if (ArgLocs.back().getLocMemOffset() != 0)
report_fatal_error("any parameter with the inalloca attribute must be "
"the only memory argument");
}
if (!IsSibcall)
Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
NumBytes - NumBytesToPush, dl);
SDValue RetAddrFrIdx;
// Load return address for tail calls.
if (isTailCall && FPDiff)
Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
Is64Bit, FPDiff, dl);
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<SDValue, 8> MemOpChains;
SDValue StackPtr;
// The next loop assumes that the locations are in the same order of the
// input arguments.
assert(isSortedByValueNo(ArgLocs) &&
"Argument Location list must be sorted before lowering");
// Walk the register/memloc assignments, inserting copies/loads. In the case
// of tail call optimization arguments are handle later.
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
++I, ++OutIndex) {
assert(OutIndex < Outs.size() && "Invalid Out index");
// Skip inalloca arguments, they have already been written.
ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
if (Flags.isInAlloca())
continue;
CCValAssign &VA = ArgLocs[I];
EVT RegVT = VA.getLocVT();
SDValue Arg = OutVals[OutIndex];
bool isByVal = Flags.isByVal();
// Promote the value if needed.
switch (VA.getLocInfo()) {
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break;
case CCValAssign::SExt:
Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
break;
case CCValAssign::ZExt:
Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
break;
case CCValAssign::AExt:
if (Arg.getValueType().isVector() &&
Arg.getValueType().getVectorElementType() == MVT::i1)
Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
else if (RegVT.is128BitVector()) {
// Special case: passing MMX values in XMM registers.
Arg = DAG.getBitcast(MVT::i64, Arg);
Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
} else
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
break;
case CCValAssign::BCvt:
Arg = DAG.getBitcast(RegVT, Arg);
break;
case CCValAssign::Indirect: {
// Store the argument.
SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
Chain = DAG.getStore(
Chain, dl, Arg, SpillSlot,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
Arg = SpillSlot;
break;
}
}
if (VA.needsCustom()) {
assert(VA.getValVT() == MVT::v64i1 &&
"Currently the only custom case is when we split v64i1 to 2 regs");
// Split v64i1 value into two registers
Passv64i1ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++I],
Subtarget);
} else if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
if (isVarArg && IsWin64) {
// Win64 ABI requires argument XMM reg to be copied to the corresponding
// shadow reg if callee is a varargs function.
unsigned ShadowReg = 0;
switch (VA.getLocReg()) {
case X86::XMM0: ShadowReg = X86::RCX; break;
case X86::XMM1: ShadowReg = X86::RDX; break;
case X86::XMM2: ShadowReg = X86::R8; break;
case X86::XMM3: ShadowReg = X86::R9; break;
}
if (ShadowReg)
RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
}
} else if (!IsSibcall && (!isTailCall || isByVal)) {
assert(VA.isMemLoc());
if (!StackPtr.getNode())
StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
getPointerTy(DAG.getDataLayout()));
MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
dl, DAG, VA, Flags));
}
}
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
if (Subtarget.isPICStyleGOT()) {
// ELF / PIC requires GOT in the EBX register before function calls via PLT
// GOT pointer.
if (!isTailCall) {
RegsToPass.push_back(std::make_pair(
unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
getPointerTy(DAG.getDataLayout()))));
} else {
// If we are tail calling and generating PIC/GOT style code load the
// address of the callee into ECX. The value in ecx is used as target of
// the tail jump. This is done to circumvent the ebx/callee-saved problem
// for tail calls on PIC/GOT architectures. Normally we would just put the
// address of GOT into ebx and then call target@PLT. But for tail calls
// ebx would be restored (since ebx is callee saved) before jumping to the
// target@PLT.
// Note: The actual moving to ECX is done further down.
GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
if (G && !G->getGlobal()->hasLocalLinkage() &&
G->getGlobal()->hasDefaultVisibility())
Callee = LowerGlobalAddress(Callee, DAG);
else if (isa<ExternalSymbolSDNode>(Callee))
Callee = LowerExternalSymbol(Callee, DAG);
}
}
if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
// From AMD64 ABI document:
// For calls that may call functions that use varargs or stdargs
// (prototype-less calls or calls to functions containing ellipsis (...) in
// the declaration) %al is used as hidden argument to specify the number
// of SSE registers used. The contents of %al do not need to match exactly
// the number of registers, but must be an ubound on the number of SSE
// registers used and is in the range 0 - 8 inclusive.
// Count the number of XMM registers allocated.
static const MCPhysReg XMMArgRegs[] = {
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
};
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
assert((Subtarget.hasSSE1() || !NumXMMRegs)
&& "SSE registers cannot be used when SSE is disabled");
RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
DAG.getConstant(NumXMMRegs, dl,
MVT::i8)));
}
if (isVarArg && IsMustTail) {
const auto &Forwards = X86Info->getForwardedMustTailRegParms();
for (const auto &F : Forwards) {
SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
}
}
// For tail calls lower the arguments to the 'real' stack slots. Sibcalls
// don't need this because the eligibility check rejects calls that require
// shuffling arguments passed in memory.
if (!IsSibcall && isTailCall) {
// Force all the incoming stack arguments to be loaded from the stack
// before any new outgoing arguments are stored to the stack, because the
// outgoing stack slots may alias the incoming argument stack slots, and
// the alias isn't otherwise explicit. This is slightly more conservative
// than necessary, because it means that each store effectively depends
// on every argument instead of just those arguments it would clobber.
SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
SmallVector<SDValue, 8> MemOpChains2;
SDValue FIN;
int FI = 0;
for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
++I, ++OutsIndex) {
CCValAssign &VA = ArgLocs[I];
if (VA.isRegLoc()) {
if (VA.needsCustom()) {
assert((CallConv == CallingConv::X86_RegCall) &&
"Expecting custom case only in regcall calling convention");
// This means that we are in special case where one argument was
// passed through two register locations - Skip the next location
++I;
}
continue;
}
assert(VA.isMemLoc());
SDValue Arg = OutVals[OutsIndex];
ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
// Skip inalloca arguments. They don't require any work.
if (Flags.isInAlloca())
continue;
// Create frame index.
int32_t Offset = VA.getLocMemOffset()+FPDiff;
uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
if (Flags.isByVal()) {
// Copy relative to framepointer.
SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
if (!StackPtr.getNode())
StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
getPointerTy(DAG.getDataLayout()));
Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
StackPtr, Source);
MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
ArgChain,
Flags, DAG, dl));
} else {
// Store relative to framepointer.
MemOpChains2.push_back(DAG.getStore(
ArgChain, dl, Arg, FIN,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
}
}
if (!MemOpChains2.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
// Store the return address to the appropriate stack slot.
Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
getPointerTy(DAG.getDataLayout()),
RegInfo->getSlotSize(), FPDiff, dl);
}
// Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into registers.
SDValue InFlag;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1);
}
if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
// In the 64-bit large code model, we have to make all calls
// through a register, since the call instruction's 32-bit
// pc-relative offset may not be large enough to hold the whole
// address.
} else if (Callee->getOpcode() == ISD::GlobalAddress) {
// If the callee is a GlobalAddress node (quite common, every direct call
// is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
// it.
GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
// We should use extra load for direct calls to dllimported functions in
// non-JIT mode.
const GlobalValue *GV = G->getGlobal();
if (!GV->hasDLLImportStorageClass()) {
unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV);
Callee = DAG.getTargetGlobalAddress(
GV, dl, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags);
if (OpFlags == X86II::MO_GOTPCREL) {
// Add a wrapper.
Callee = DAG.getNode(X86ISD::WrapperRIP, dl,
getPointerTy(DAG.getDataLayout()), Callee);
// Add extra indirection
Callee = DAG.getLoad(
getPointerTy(DAG.getDataLayout()), dl, DAG.getEntryNode(), Callee,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
}
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
unsigned char OpFlags =
Subtarget.classifyGlobalFunctionReference(nullptr, *Mod);
Callee = DAG.getTargetExternalSymbol(
S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags);
if (OpFlags == X86II::MO_GOTPCREL) {
Callee = DAG.getNode(X86ISD::WrapperRIP, dl,
getPointerTy(DAG.getDataLayout()), Callee);
Callee = DAG.getLoad(
getPointerTy(DAG.getDataLayout()), dl, DAG.getEntryNode(), Callee,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
}
} else if (Subtarget.isTarget64BitILP32() &&
Callee->getValueType(0) == MVT::i32) {
// Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
}
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SmallVector<SDValue, 8> Ops;
if (!IsSibcall && isTailCall) {
Chain = DAG.getCALLSEQ_END(Chain,
DAG.getIntPtrConstant(NumBytesToPop, dl, true),
DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
InFlag = Chain.getValue(1);
}
Ops.push_back(Chain);
Ops.push_back(Callee);
if (isTailCall)
Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
// Add argument registers to the end of the list so that they are known live
// into the call.
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
RegsToPass[i].second.getValueType()));
// Add a register mask operand representing the call-preserved registers.
// If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
// set X86_INTR calling convention because it has the same CSR mask
// (same preserved registers).
const uint32_t *Mask = RegInfo->getCallPreservedMask(
MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
assert(Mask && "Missing call preserved mask for calling convention");
// If this is an invoke in a 32-bit function using a funclet-based
// personality, assume the function clobbers all registers. If an exception
// is thrown, the runtime will not restore CSRs.
// FIXME: Model this more precisely so that we can register allocate across
// the normal edge and spill and fill across the exceptional edge.
if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
const Function &CallerFn = MF.getFunction();
EHPersonality Pers =
CallerFn.hasPersonalityFn()
? classifyEHPersonality(CallerFn.getPersonalityFn())
: EHPersonality::Unknown;
if (isFuncletEHPersonality(Pers))
Mask = RegInfo->getNoPreservedMask();
}
// Define a new register mask from the existing mask.
uint32_t *RegMask = nullptr;
// In some calling conventions we need to remove the used physical registers
// from the reg mask.
if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
// Allocate a new Reg Mask and copy Mask.
RegMask = MF.allocateRegMask();
unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
// Make sure all sub registers of the argument registers are reset
// in the RegMask.
for (auto const &RegPair : RegsToPass)
for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
SubRegs.isValid(); ++SubRegs)
RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
// Create the RegMask Operand according to our updated mask.
Ops.push_back(DAG.getRegisterMask(RegMask));
} else {
// Create the RegMask Operand according to the static mask.
Ops.push_back(DAG.getRegisterMask(Mask));
}
if (InFlag.getNode())
Ops.push_back(InFlag);
if (isTailCall) {
// We used to do:
//// If this is the first return lowered for this function, add the regs
//// to the liveout set for the function.
// This isn't right, although it's probably harmless on x86; liveouts
// should be computed from returns not tail calls. Consider a void
// function making a tail call to a function returning int.
MF.getFrameInfo().setHasTailCall();
return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
}
if (HasNoCfCheck && IsCFProtectionSupported) {
Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
} else {
Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
}
InFlag = Chain.getValue(1);
// Create the CALLSEQ_END node.
unsigned NumBytesForCalleeToPop;
if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
DAG.getTarget().Options.GuaranteedTailCallOpt))
NumBytesForCalleeToPop = NumBytes; // Callee pops everything
else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
!Subtarget.getTargetTriple().isOSMSVCRT() &&
SR == StackStructReturn)
// If this is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
// This is common for Darwin/X86, Linux & Mingw32 targets.
// For MSVC Win32 targets, the caller pops the hidden struct pointer.
NumBytesForCalleeToPop = 4;
else
NumBytesForCalleeToPop = 0; // Callee pops nothing.
if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
// No need to reset the stack after the call if the call doesn't return. To
// make the MI verify, we'll pretend the callee does it for us.
NumBytesForCalleeToPop = NumBytes;
}
// Returns a flag for retval copy to use.
if (!IsSibcall) {
Chain = DAG.getCALLSEQ_END(Chain,
DAG.getIntPtrConstant(NumBytesToPop, dl, true),
DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
true),
InFlag, dl);
InFlag = Chain.getValue(1);
}
// Handle result values, copying them out of physregs into vregs that we
// return.
return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
InVals, RegMask);
}
//===----------------------------------------------------------------------===//
// Fast Calling Convention (tail call) implementation
//===----------------------------------------------------------------------===//
// Like std call, callee cleans arguments, convention except that ECX is
// reserved for storing the tail called function address. Only 2 registers are
// free for argument passing (inreg). Tail call optimization is performed
// provided:
// * tailcallopt is enabled
// * caller/callee are fastcc
// On X86_64 architecture with GOT-style position independent code only local
// (within module) calls are supported at the moment.
// To keep the stack aligned according to platform abi the function
// GetAlignedArgumentStackSize ensures that argument delta is always multiples
// of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
// If a tail called function callee has more arguments than the caller the
// caller needs to make sure that there is room to move the RETADDR to. This is
// achieved by reserving an area the size of the argument delta right after the
// original RETADDR, but before the saved framepointer or the spilled registers
// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
// stack layout:
// arg1
// arg2
// RETADDR
// [ new RETADDR
// move area ]
// (possible EBP)
// ESI
// EDI
// local1 ..
/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
/// requirement.
unsigned
X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
SelectionDAG& DAG) const {
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
uint64_t AlignMask = StackAlignment - 1;
int64_t Offset = StackSize;
unsigned SlotSize = RegInfo->getSlotSize();
if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
// Number smaller than 12 so just add the difference.
Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
} else {
// Mask out lower bits, add stackalignment once plus the 12 bytes.
Offset = ((~AlignMask) & Offset) + StackAlignment +
(StackAlignment-SlotSize);
}
return Offset;
}
/// Return true if the given stack call argument is already available in the
/// same position (relatively) of the caller's incoming argument stack.
static
bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
const X86InstrInfo *TII, const CCValAssign &VA) {
unsigned Bytes = Arg.getValueSizeInBits() / 8;
for (;;) {
// Look through nodes that don't alter the bits of the incoming value.
unsigned Op = Arg.getOpcode();
if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
Arg = Arg.getOperand(0);
continue;
}
if (Op == ISD::TRUNCATE) {
const SDValue &TruncInput = Arg.getOperand(0);
if (TruncInput.getOpcode() == ISD::AssertZext &&
cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
Arg.getValueType()) {
Arg = TruncInput.getOperand(0);
continue;
}
}
break;
}
int FI = INT_MAX;
if (Arg.getOpcode() == ISD::CopyFromReg) {
unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
if (!TargetRegisterInfo::isVirtualRegister(VR))
return false;
MachineInstr *Def = MRI->getVRegDef(VR);
if (!Def)
return false;
if (!Flags.isByVal()) {
if (!TII->isLoadFromStackSlot(*Def, FI))
return false;
} else {
unsigned Opcode = Def->getOpcode();
if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
Opcode == X86::LEA64_32r) &&
Def->getOperand(1).isFI()) {
FI = Def->getOperand(1).getIndex();
Bytes = Flags.getByValSize();
} else
return false;
}
} else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
if (Flags.isByVal())
// ByVal argument is passed in as a pointer but it's now being
// dereferenced. e.g.
// define @foo(%struct.X* %A) {
// tail call @bar(%struct.X* byval %A)
// }
return false;
SDValue Ptr = Ld->getBasePtr();
FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
if (!FINode)
return false;
FI = FINode->getIndex();
} else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
FI = FINode->getIndex();
Bytes = Flags.getByValSize();
} else
return false;
assert(FI != INT_MAX);
if (!MFI.isFixedObjectIndex(FI))
return false;
if (Offset != MFI.getObjectOffset(FI))
return false;
// If this is not byval, check that the argument stack object is immutable.
// inalloca and argument copy elision can create mutable argument stack
// objects. Byval objects can be mutated, but a byval call intends to pass the
// mutated memory.
if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
return false;
if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
// If the argument location is wider than the argument type, check that any
// extension flags match.
if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
Flags.isSExt() != MFI.isObjectSExt(FI)) {
return false;
}
}
return Bytes == MFI.getObjectSize(FI);
}
/// Check whether the call is eligible for tail call optimization. Targets
/// that want to do tail call optimization should implement this function.
bool X86TargetLowering::IsEligibleForTailCallOptimization(
SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
if (!mayTailCallThisCC(CalleeCC))
return false;
// If -tailcallopt is specified, make fastcc functions tail-callable.
MachineFunction &MF = DAG.getMachineFunction();
const Function &CallerF = MF.getFunction();
// If the function return type is x86_fp80 and the callee return type is not,
// then the FP_EXTEND of the call result is not a nop. It's not safe to
// perform a tailcall optimization here.
if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
return false;
CallingConv::ID CallerCC = CallerF.getCallingConv();
bool CCMatch = CallerCC == CalleeCC;
bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
// Win64 functions have extra shadow space for argument homing. Don't do the
// sibcall if the caller and callee have mismatched expectations for this
// space.
if (IsCalleeWin64 != IsCallerWin64)
return false;
if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
if (canGuaranteeTCO(CalleeCC) && CCMatch)
return true;
return false;
}
// Look for obvious safe cases to perform tail call optimization that do not
// require ABI changes. This is what gcc calls sibcall.
// Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
// emit a special epilogue.
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
if (RegInfo->needsStackRealignment(MF))
return false;
// Also avoid sibcall optimization if either caller or callee uses struct
// return semantics.
if (isCalleeStructRet || isCallerStructRet)
return false;
// Do not sibcall optimize vararg calls unless all arguments are passed via
// registers.
LLVMContext &C = *DAG.getContext();
if (isVarArg && !Outs.empty()) {
// Optimizing for varargs on Win64 is unlikely to be safe without
// additional testing.
if (IsCalleeWin64 || IsCallerWin64)
return false;
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
if (!ArgLocs[i].isRegLoc())
return false;
}
// If the call result is in ST0 / ST1, it needs to be popped off the x87
// stack. Therefore, if it's not used by the call it is not safe to optimize
// this into a sibcall.
bool Unused = false;
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
if (!Ins[i].Used) {
Unused = true;
break;
}
}
if (Unused) {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
CCValAssign &VA = RVLocs[i];
if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
return false;
}
}
// Check that the call results are passed in the same way.
if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
RetCC_X86, RetCC_X86))
return false;
// The callee has to preserve all registers the caller needs to preserve.
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
if (!CCMatch) {
const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
return false;
}
unsigned StackArgsSize = 0;
// If the callee takes no arguments then go on to check the results of the
// call.
if (!Outs.empty()) {
// Check if stack adjustment is needed. For now, do not do this if any
// argument is passed on the stack.
SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
// Allocate shadow area for Win64
if (IsCalleeWin64)
CCInfo.AllocateStack(32, 8);
CCInfo.AnalyzeCallOperands(Outs, CC_X86);
StackArgsSize = CCInfo.getNextStackOffset();
if (CCInfo.getNextStackOffset()) {
// Check if the arguments are already laid out in the right way as
// the caller's fixed stack objects.
MachineFrameInfo &MFI = MF.getFrameInfo();
const MachineRegisterInfo *MRI = &MF.getRegInfo();
const X86InstrInfo *TII = Subtarget.getInstrInfo();
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags;
if (VA.getLocInfo() == CCValAssign::Indirect)
return false;
if (!VA.isRegLoc()) {
if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
MFI, MRI, TII, VA))
return false;
}
}
}
bool PositionIndependent = isPositionIndependent();
// If the tailcall address may be in a register, then make sure it's
// possible to register allocate for it. In 32-bit, the call address can
// only target EAX, EDX, or ECX since the tail call must be scheduled after
// callee-saved registers are restored. These happen to be the same
// registers used to pass 'inreg' arguments so watch out for those.
if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
!isa<ExternalSymbolSDNode>(Callee)) ||
PositionIndependent)) {
unsigned NumInRegs = 0;
// In PIC we need an extra register to formulate the address computation
// for the callee.
unsigned MaxInRegs = PositionIndependent ? 2 : 3;
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i];
if (!VA.isRegLoc())
continue;
unsigned Reg = VA.getLocReg();
switch (Reg) {
default: break;
case X86::EAX: case X86::EDX: case X86::ECX:
if (++NumInRegs == MaxInRegs)
return false;
break;
}
}
}
const MachineRegisterInfo &MRI = MF.getRegInfo();
if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
return false;
}
bool CalleeWillPop =
X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
MF.getTarget().Options.GuaranteedTailCallOpt);
if (unsigned BytesToPop =
MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
// If we have bytes to pop, the callee must pop them.
bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
if (!CalleePopMatches)
return false;
} else if (CalleeWillPop && StackArgsSize > 0) {
// If we don't have bytes to pop, make sure the callee doesn't pop any.
return false;
}
return true;
}
FastISel *
X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) const {
return X86::createFastISel(funcInfo, libInfo);
}
//===----------------------------------------------------------------------===//
// Other Lowering Hooks
//===----------------------------------------------------------------------===//
static bool MayFoldLoad(SDValue Op) {
return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
}
static bool MayFoldIntoStore(SDValue Op) {
return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
}
static bool MayFoldIntoZeroExtend(SDValue Op) {
if (Op.hasOneUse()) {
unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
return (ISD::ZERO_EXTEND == Opcode);
}
return false;
}
static bool isTargetShuffle(unsigned Opcode) {
switch(Opcode) {
default: return false;
case X86ISD::BLENDI:
case X86ISD::PSHUFB:
case X86ISD::PSHUFD:
case X86ISD::PSHUFHW:
case X86ISD::PSHUFLW:
case X86ISD::SHUFP:
case X86ISD::INSERTPS:
case X86ISD::EXTRQI:
case X86ISD::INSERTQI:
case X86ISD::PALIGNR:
case X86ISD::VSHLDQ:
case X86ISD::VSRLDQ:
case X86ISD::MOVLHPS:
case X86ISD::MOVHLPS:
case X86ISD::MOVSHDUP:
case X86ISD::MOVSLDUP:
case X86ISD::MOVDDUP:
case X86ISD::MOVSS:
case X86ISD::MOVSD:
case X86ISD::UNPCKL:
case X86ISD::UNPCKH:
case X86ISD::VBROADCAST:
case X86ISD::VPERMILPI:
case X86ISD::VPERMILPV:
case X86ISD::VPERM2X128:
case X86ISD::SHUF128:
case X86ISD::VPERMIL2:
case X86ISD::VPERMI:
case X86ISD::VPPERM:
case X86ISD::VPERMV:
case X86ISD::VPERMV3:
case X86ISD::VZEXT_MOVL:
return true;
}
}
static bool isTargetShuffleVariableMask(unsigned Opcode) {
switch (Opcode) {
default: return false;
// Target Shuffles.
case X86ISD::PSHUFB:
case X86ISD::VPERMILPV:
case X86ISD::VPERMIL2:
case X86ISD::VPPERM:
case X86ISD::VPERMV:
case X86ISD::VPERMV3:
return true;
// 'Faux' Target Shuffles.
case ISD::AND:
case X86ISD::ANDNP:
return true;
}
}
SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
int ReturnAddrIndex = FuncInfo->getRAIndex();
if (ReturnAddrIndex == 0) {
// Set up a frame object for the return address.
unsigned SlotSize = RegInfo->getSlotSize();
ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
-(int64_t)SlotSize,
false);
FuncInfo->setRAIndex(ReturnAddrIndex);
}
return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
}
bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
bool hasSymbolicDisplacement) {
// Offset should fit into 32 bit immediate field.
if (!isInt<32>(Offset))
return false;
// If we don't have a symbolic displacement - we don't have any extra
// restrictions.
if (!hasSymbolicDisplacement)
return true;
// FIXME: Some tweaks might be needed for medium code model.
if (M != CodeModel::Small && M != CodeModel::Kernel)
return false;
// For small code model we assume that latest object is 16MB before end of 31
// bits boundary. We may also accept pretty large negative constants knowing
// that all objects are in the positive half of address space.
if (M == CodeModel::Small && Offset < 16*1024*1024)
return true;
// For kernel code model we know that all object resist in the negative half
// of 32bits address space. We may not accept negative offsets, since they may
// be just off and we may accept pretty large positive ones.
if (M == CodeModel::Kernel && Offset >= 0)
return true;
return false;
}
/// Determines whether the callee is required to pop its own arguments.
/// Callee pop is necessary to support tail calls.
bool X86::isCalleePop(CallingConv::ID CallingConv,
bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
// If GuaranteeTCO is true, we force some calls to be callee pop so that we
// can guarantee TCO.
if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
return true;
switch (CallingConv) {
default:
return false;
case CallingConv::X86_StdCall:
case CallingConv::X86_FastCall:
case CallingConv::X86_ThisCall:
case CallingConv::X86_VectorCall:
return !is64Bit;
}
}
/// Return true if the condition is an unsigned comparison operation.
static bool isX86CCUnsigned(unsigned X86CC) {
switch (X86CC) {
default:
llvm_unreachable("Invalid integer condition!");
case X86::COND_E:
case X86::COND_NE:
case X86::COND_B:
case X86::COND_A:
case X86::COND_BE:
case X86::COND_AE:
return true;
case X86::COND_G:
case X86::COND_GE:
case X86::COND_L:
case X86::COND_LE:
return false;
}
}
static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
switch (SetCCOpcode) {
default: llvm_unreachable("Invalid integer condition!");
case ISD::SETEQ: return X86::COND_E;
case ISD::SETGT: return X86::COND_G;
case ISD::SETGE: return X86::COND_GE;
case ISD::SETLT: return X86::COND_L;
case ISD::SETLE: return X86::COND_LE;
case ISD::SETNE: return X86::COND_NE;
case ISD::SETULT: return X86::COND_B;
case ISD::SETUGT: return X86::COND_A;
case ISD::SETULE: return X86::COND_BE;
case ISD::SETUGE: return X86::COND_AE;
}
}
/// Do a one-to-one translation of a ISD::CondCode to the X86-specific
/// condition code, returning the condition code and the LHS/RHS of the
/// comparison to make.
static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
bool isFP, SDValue &LHS, SDValue &RHS,
SelectionDAG &DAG) {
if (!isFP) {
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
// X > -1 -> X == 0, jump !sign.
RHS = DAG.getConstant(0, DL, RHS.getValueType());
return X86::COND_NS;
}
if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
// X < 0 -> X == 0, jump on sign.
return X86::COND_S;
}
if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
// X < 1 -> X <= 0
RHS = DAG.getConstant(0, DL, RHS.getValueType());
return X86::COND_LE;
}
}
return TranslateIntegerX86CC(SetCCOpcode);
}
// First determine if it is required or is profitable to flip the operands.
// If LHS is a foldable load, but RHS is not, flip the condition.
if (ISD::isNON_EXTLoad(LHS.getNode()) &&
!ISD::isNON_EXTLoad(RHS.getNode())) {
SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
std::swap(LHS, RHS);
}
switch (SetCCOpcode) {
default: break;
case ISD::SETOLT:
case ISD::SETOLE:
case ISD::SETUGT:
case ISD::SETUGE:
std::swap(LHS, RHS);
break;
}
// On a floating point condition, the flags are set as follows:
// ZF PF CF op
// 0 | 0 | 0 | X > Y
// 0 | 0 | 1 | X < Y
// 1 | 0 | 0 | X == Y
// 1 | 1 | 1 | unordered
switch (SetCCOpcode) {
default: llvm_unreachable("Condcode should be pre-legalized away");
case ISD::SETUEQ:
case ISD::SETEQ: return X86::COND_E;
case ISD::SETOLT: // flipped
case ISD::SETOGT:
case ISD::SETGT: return X86::COND_A;
case ISD::SETOLE: // flipped
case ISD::SETOGE:
case ISD::SETGE: return X86::COND_AE;
case ISD::SETUGT: // flipped
case ISD::SETULT:
case ISD::SETLT: return X86::COND_B;
case ISD::SETUGE: // flipped
case ISD::SETULE:
case ISD::SETLE: return X86::COND_BE;
case ISD::SETONE:
case ISD::SETNE: return X86::COND_NE;
case ISD::SETUO: return X86::COND_P;
case ISD::SETO: return X86::COND_NP;
case ISD::SETOEQ:
case ISD::SETUNE: return X86::COND_INVALID;
}
}
/// Is there a floating point cmov for the specific X86 condition code?
/// Current x86 isa includes the following FP cmov instructions:
/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
static bool hasFPCMov(unsigned X86CC) {
switch (X86CC) {
default:
return false;
case X86::COND_B:
case X86::COND_BE:
case X86::COND_E:
case X86::COND_P:
case X86::COND_A:
case X86::COND_AE:
case X86::COND_NE:
case X86::COND_NP:
return true;
}
}
bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
const CallInst &I,
MachineFunction &MF,
unsigned Intrinsic) const {
const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
if (!IntrData)
return false;
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.flags = MachineMemOperand::MONone;
Info.offset = 0;
switch (IntrData->Type) {
case TRUNCATE_TO_MEM_VI8:
case TRUNCATE_TO_MEM_VI16:
case TRUNCATE_TO_MEM_VI32: {
Info.ptrVal = I.getArgOperand(0);
MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
ScalarVT = MVT::i8;
else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
ScalarVT = MVT::i16;
else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
ScalarVT = MVT::i32;
Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
Info.align = 1;
Info.flags |= MachineMemOperand::MOStore;
break;
}
default:
return false;
}
return true;
}
/// Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
return true;
}
return false;
}
bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
ISD::LoadExtType ExtTy,
EVT NewVT) const {
// "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
// relocation target a movq or addq instruction: don't let the load shrink.
SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
return true;
}
/// Returns true if it is beneficial to convert a load of a constant
/// to just the constant itself.
bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const {
assert(Ty->isIntegerTy());
unsigned BitSize = Ty->getPrimitiveSizeInBits();
if (BitSize == 0 || BitSize > 64)
return false;
return true;
}
bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
// TODO: It might be a win to ease or lift this restriction, but the generic
// folds in DAGCombiner conflict with vector folds for an AVX512 target.
if (VT.isVector() && Subtarget.hasAVX512())
return false;
return true;
}
bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
unsigned Index) const {
if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
return false;
// Mask vectors support all subregister combinations and operations that
// extract half of vector.
if (ResVT.getVectorElementType() == MVT::i1)
return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
(Index == ResVT.getVectorNumElements()));
return (Index % ResVT.getVectorNumElements()) == 0;
}
bool X86TargetLowering::isCheapToSpeculateCttz() const {
// Speculate cttz only if we can directly use TZCNT.
return Subtarget.hasBMI();
}
bool X86TargetLowering::isCheapToSpeculateCtlz() const {
// Speculate ctlz only if we can directly use LZCNT.
return Subtarget.hasLZCNT();
}
bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT,
EVT BitcastVT) const {
if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1)
return false;
return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT);
}
bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
const SelectionDAG &DAG) const {
// Do not merge to float value size (128 bytes) if no implicit
// float attribute is set.
bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
Attribute::NoImplicitFloat);
if (NoFloat) {
unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
return (MemVT.getSizeInBits() <= MaxIntSize);
}
return true;
}
bool X86TargetLowering::isCtlzFast() const {
return Subtarget.hasFastLZCNT();
}
bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
const Instruction &AndI) const {
return true;
}
bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
EVT VT = Y.getValueType();
if (VT.isVector())
return false;
if (!Subtarget.hasBMI())
return false;
// There are only 32-bit and 64-bit forms for 'andn'.
if (VT != MVT::i32 && VT != MVT::i64)
return false;
// A mask and compare against constant is ok for an 'andn' too
// even though the BMI instruction doesn't have an immediate form.
return true;
}
bool X86TargetLowering::hasAndNot(SDValue Y) const {
EVT VT = Y.getValueType();
if (!VT.isVector()) // x86 can't form 'andn' with an immediate.
return !isa<ConstantSDNode>(Y) && hasAndNotCompare(Y);
// Vector.
if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
return false;
if (VT == MVT::v4i32)
return true;
return Subtarget.hasSSE2();
}
bool X86TargetLowering::preferShiftsToClearExtremeBits(SDValue Y) const {
EVT VT = Y.getValueType();
// For vectors, we don't have a preference, but we probably want a mask.
if (VT.isVector())
return false;
// 64-bit shifts on 32-bit targets produce really bad bloated code.
if (VT == MVT::i64 && !Subtarget.is64Bit())
return false;
return true;
}
MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
MVT VT = MVT::getIntegerVT(NumBits);
if (isTypeLegal(VT))
return VT;
// PMOVMSKB can handle this.
if (NumBits == 128 && isTypeLegal(MVT::v16i8))
return MVT::v16i8;
// VPMOVMSKB can handle this.
if (NumBits == 256 && isTypeLegal(MVT::v32i8))
return MVT::v32i8;
// TODO: Allow 64-bit type for 32-bit target.
// TODO: 512-bit types should be allowed, but make sure that those
// cases are handled in combineVectorSizedSetCCEquality().
return MVT::INVALID_SIMPLE_VALUE_TYPE;
}
/// Val is the undef sentinel value or equal to the specified value.
static bool isUndefOrEqual(int Val, int CmpVal) {
return ((Val == SM_SentinelUndef) || (Val == CmpVal));
}
/// Val is either the undef or zero sentinel value.
static bool isUndefOrZero(int Val) {
return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
}
/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size is the undef sentinel value.
static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
for (unsigned i = Pos, e = Pos + Size; i != e; ++i)
if (Mask[i] != SM_SentinelUndef)
return false;
return true;
}
/// Return true if Val falls within the specified range (L, H].
static bool isInRange(int Val, int Low, int Hi) {
return (Val >= Low && Val < Hi);
}
/// Return true if the value of any element in Mask falls within the specified
/// range (L, H].
static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
for (int M : Mask)
if (isInRange(M, Low, Hi))
return true;
return false;
}
/// Return true if Val is undef or if its value falls within the
/// specified range (L, H].
static bool isUndefOrInRange(int Val, int Low, int Hi) {
return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
}
/// Return true if every element in Mask is undef or if its value
/// falls within the specified range (L, H].
static bool isUndefOrInRange(ArrayRef<int> Mask,
int Low, int Hi) {
for (int M : Mask)
if (!isUndefOrInRange(M, Low, Hi))
return false;
return true;
}
/// Return true if Val is undef, zero or if its value falls within the
/// specified range (L, H].
static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
}
/// Return true if every element in Mask is undef, zero or if its value
/// falls within the specified range (L, H].
static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
for (int M : Mask)
if (!isUndefOrZeroOrInRange(M, Low, Hi))
return false;
return true;
}
/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos + Size, falls within the specified
/// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
unsigned Size, int Low, int Step = 1) {
for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
if (!isUndefOrEqual(Mask[i], Low))
return false;
return true;
}
/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size, falls within the specified
/// sequential range (Low, Low+Size], or is undef or is zero.
static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
unsigned Size, int Low) {
for (unsigned i = Pos, e = Pos + Size; i != e; ++i, ++Low)
if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
return false;
return true;
}
/// Return true if every element in Mask, beginning
/// from position Pos and ending in Pos+Size is undef or is zero.
static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
unsigned Size) {
for (unsigned i = Pos, e = Pos + Size; i != e; ++i)
if (!isUndefOrZero(Mask[i]))
return false;
return true;
}
/// Helper function to test whether a shuffle mask could be
/// simplified by widening the elements being shuffled.
///
/// Appends the mask for wider elements in WidenedMask if valid. Otherwise
/// leaves it in an unspecified state.
///
/// NOTE: This must handle normal vector shuffle masks and *target* vector
/// shuffle masks. The latter have the special property of a '-2' representing
/// a zero-ed lane of a vector.
static bool canWidenShuffleElements(ArrayRef<int> Mask,
SmallVectorImpl<int> &WidenedMask) {
WidenedMask.assign(Mask.size() / 2, 0);
for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
int M0 = Mask[i];
int M1 = Mask[i + 1];
// If both elements are undef, its trivial.
if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
WidenedMask[i / 2] = SM_SentinelUndef;
continue;
}
// Check for an undef mask and a mask value properly aligned to fit with
// a pair of values. If we find such a case, use the non-undef mask's value.
if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
WidenedMask[i / 2] = M1 / 2;
continue;
}
if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
WidenedMask[i / 2] = M0 / 2;
continue;
}
// When zeroing, we need to spread the zeroing across both lanes to widen.
if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
(M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
WidenedMask[i / 2] = SM_SentinelZero;
continue;
}
return false;
}
// Finally check if the two mask values are adjacent and aligned with
// a pair.
if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
WidenedMask[i / 2] = M0 / 2;
continue;
}
// Otherwise we can't safely widen the elements used in this shuffle.
return false;
}
assert(WidenedMask.size() == Mask.size() / 2 &&
"Incorrect size of mask after widening the elements!");
return true;
}
static bool canWidenShuffleElements(ArrayRef<int> Mask,
const APInt &Zeroable,
SmallVectorImpl<int> &WidenedMask) {
SmallVector<int, 32> TargetMask(Mask.begin(), Mask.end());
for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
if (TargetMask[i] == SM_SentinelUndef)
continue;
if (Zeroable[i])
TargetMask[i] = SM_SentinelZero;
}
return canWidenShuffleElements(TargetMask, WidenedMask);
}
static bool canWidenShuffleElements(ArrayRef<int> Mask) {
SmallVector<int, 32> WidenedMask;
return canWidenShuffleElements(Mask, WidenedMask);
}
/// Returns true if Elt is a constant zero or a floating point constant +0.0.
bool X86::isZeroNode(SDValue Elt) {
return isNullConstant(Elt) || isNullFPConstant(Elt);
}
// Build a vector of constants.
// Use an UNDEF node if MaskElt == -1.
// Split 64-bit constants in the 32-bit mode.
static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
const SDLoc &dl, bool IsMask = false) {
SmallVector<SDValue, 32> Ops;
bool Split = false;
MVT ConstVecVT = VT;
unsigned NumElts = VT.getVectorNumElements();
bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
Split = true;
}
MVT EltVT = ConstVecVT.getVectorElementType();
for (unsigned i = 0; i < NumElts; ++i) {
bool IsUndef = Values[i] < 0 && IsMask;
SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
DAG.getConstant(Values[i], dl, EltVT);
Ops.push_back(OpNode);
if (Split)
Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
DAG.getConstant(0, dl, EltVT));
}
SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
if (Split)
ConstsNode = DAG.getBitcast(VT, ConstsNode);
return ConstsNode;
}
static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
assert(Bits.size() == Undefs.getBitWidth() &&
"Unequal constant and undef arrays");
SmallVector<SDValue, 32> Ops;
bool Split = false;
MVT ConstVecVT = VT;
unsigned NumElts = VT.getVectorNumElements();
bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
Split = true;
}
MVT EltVT = ConstVecVT.getVectorElementType();
for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
if (Undefs[i]) {
Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
continue;
}
const APInt &V = Bits[i];
assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
if (Split) {
Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
} else if (EltVT == MVT::f32) {
APFloat FV(APFloat::IEEEsingle(), V);
Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
} else if (EltVT == MVT::f64) {
APFloat FV(APFloat::IEEEdouble(), V);
Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
} else {
Ops.push_back(DAG.getConstant(V, dl, EltVT));
}
}
SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
return DAG.getBitcast(VT, ConstsNode);
}
/// Returns a vector of specified type with all zero elements.
static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
SelectionDAG &DAG, const SDLoc &dl) {
assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
VT.getVectorElementType() == MVT::i1) &&
"Unexpected vector type");
// Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
// type. This ensures they get CSE'd. But if the integer type is not
// available, use a floating-point +0.0 instead.
SDValue Vec;
if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
} else if (VT.getVectorElementType() == MVT::i1) {
assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
"Unexpected vector type");
Vec = DAG.getConstant(0, dl, VT);
} else {
unsigned Num32BitElts = VT.getSizeInBits() / 32;
Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
}
return DAG.getBitcast(VT, Vec);
}
static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
const SDLoc &dl, unsigned vectorWidth) {
EVT VT = Vec.getValueType();
EVT ElVT = VT.getVectorElementType();
unsigned Factor = VT.getSizeInBits()/vectorWidth;
EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
VT.getVectorNumElements()/Factor);
// Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
// This is the index of the first element of the vectorWidth-bit chunk
// we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
IdxVal &= ~(ElemsPerChunk - 1);
// If the input is a buildvector just emit a smaller one.
if (Vec.getOpcode() == ISD::BUILD_VECTOR)
return DAG.getBuildVector(ResultVT, dl,
Vec->ops().slice(IdxVal, ElemsPerChunk));
SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
}
/// Generate a DAG to grab 128-bits from a vector > 128 bits. This
/// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
/// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
/// instructions or a simple subregister reference. Idx is an index in the
/// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
/// lowering EXTRACT_VECTOR_ELT operations easier.
static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
SelectionDAG &DAG, const SDLoc &dl) {
assert((Vec.getValueType().is256BitVector() ||
Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
return extractSubVector(Vec, IdxVal, DAG, dl, 128);
}
/// Generate a DAG to grab 256-bits from a 512-bit vector.
static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
SelectionDAG &DAG, const SDLoc &dl) {
assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
return extractSubVector(Vec, IdxVal, DAG, dl, 256);
}
static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
SelectionDAG &DAG, const SDLoc &dl,
unsigned vectorWidth) {
assert((vectorWidth == 128 || vectorWidth == 256) &&
"Unsupported vector width");
// Inserting UNDEF is Result
if (Vec.isUndef())
return Result;
EVT VT = Vec.getValueType();
EVT ElVT = VT.getVectorElementType();
EVT ResultVT = Result.getValueType();
// Insert the relevant vectorWidth bits.
unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
// This is the index of the first element of the vectorWidth-bit chunk
// we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
IdxVal &= ~(ElemsPerChunk - 1);
SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
}
/// Generate a DAG to put 128-bits into a vector > 128 bits. This
/// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
/// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
/// simple superregister reference. Idx is an index in the 128 bits
/// we want. It need not be aligned to a 128-bit boundary. That makes
/// lowering INSERT_VECTOR_ELT operations easier.
static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
SelectionDAG &DAG, const SDLoc &dl) {
assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
}
/// Widen a vector to a larger size with the same scalar type, with the new
/// elements either zero or undef.
static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
const X86Subtarget &Subtarget, SelectionDAG &DAG,
const SDLoc &dl) {
assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&
Vec.getValueType().getScalarType() == VT.getScalarType() &&
"Unsupported vector widening type");
SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
: DAG.getUNDEF(VT);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
DAG.getIntPtrConstant(0, dl));
}
// Helper for splitting operands of an operation to legal target size and
// apply a function on each part.
// Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
// 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
// deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
// The argument Builder is a function that will be applied on each split part:
// SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
template <typename F>
SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
F Builder, bool CheckBWI = true) {
assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
unsigned NumSubs = 1;
if ((CheckBWI && Subtarget.useBWIRegs()) ||
(!CheckBWI && Subtarget.useAVX512Regs())) {
if (VT.getSizeInBits() > 512) {
NumSubs = VT.getSizeInBits() / 512;
assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
}
} else if (Subtarget.hasAVX2()) {
if (VT.getSizeInBits() > 256) {
NumSubs = VT.getSizeInBits() / 256;
assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
}
} else {
if (VT.getSizeInBits() > 128) {
NumSubs = VT.getSizeInBits() / 128;
assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
}
}
if (NumSubs == 1)
return Builder(DAG, DL, Ops);
SmallVector<SDValue, 4> Subs;
for (unsigned i = 0; i != NumSubs; ++i) {
SmallVector<SDValue, 2> SubOps;
for (SDValue Op : Ops) {
EVT OpVT = Op.getValueType();
unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
}
Subs.push_back(Builder(DAG, DL, SubOps));
}
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
}
// Return true if the instruction zeroes the unused upper part of the
// destination and accepts mask.
static bool isMaskedZeroUpperBitsvXi1(unsigned int Opcode) {
switch (Opcode) {
default:
return false;
case X86ISD::CMPM:
case X86ISD::CMPM_RND:
case ISD::SETCC:
return true;
}
}
/// Insert i1-subvector to i1-vector.
static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
SDValue SubVec = Op.getOperand(1);
SDValue Idx = Op.getOperand(2);
if (!isa<ConstantSDNode>(Idx))
return SDValue();
// Inserting undef is a nop. We can just return the original vector.
if (SubVec.isUndef())
return Vec;
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
return Op;
MVT OpVT = Op.getSimpleValueType();
unsigned NumElems = OpVT.getVectorNumElements();
SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
// Extend to natively supported kshift.
MVT WideOpVT = OpVT;
if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
// Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
// if necessary.
if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
// May need to promote to a legal type.
Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
getZeroVector(WideOpVT, Subtarget, DAG, dl),
SubVec, Idx);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
}
MVT SubVecVT = SubVec.getSimpleValueType();
unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
assert(IdxVal + SubVecNumElems <= NumElems &&
IdxVal % SubVecVT.getSizeInBits() == 0 &&
"Unexpected index value in INSERT_SUBVECTOR");
SDValue Undef = DAG.getUNDEF(WideOpVT);
if (IdxVal == 0) {
// Zero lower bits of the Vec
SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8);
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
ZeroIdx);
Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
// Merge them together, SubVec should be zero extended.
SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
getZeroVector(WideOpVT, Subtarget, DAG, dl),
SubVec, ZeroIdx);
Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
}
SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
Undef, SubVec, ZeroIdx);
if (Vec.isUndef()) {
assert(IdxVal != 0 && "Unexpected index");
SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
DAG.getConstant(IdxVal, dl, MVT::i8));
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
}
if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
assert(IdxVal != 0 && "Unexpected index");
NumElems = WideOpVT.getVectorNumElements();
unsigned ShiftLeft = NumElems - SubVecNumElems;
unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
DAG.getConstant(ShiftLeft, dl, MVT::i8));
if (ShiftRight != 0)
SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
DAG.getConstant(ShiftRight, dl, MVT::i8));
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
}
// Simple case when we put subvector in the upper part
if (IdxVal + SubVecNumElems == NumElems) {
SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
DAG.getConstant(IdxVal, dl, MVT::i8));
if (SubVecNumElems * 2 == NumElems) {
// Special case, use legal zero extending insert_subvector. This allows
// isel to opimitize when bits are known zero.
Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
getZeroVector(WideOpVT, Subtarget, DAG, dl),
Vec, ZeroIdx);
} else {
// Otherwise use explicit shifts to zero the bits.
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
Undef, Vec, ZeroIdx);
NumElems = WideOpVT.getVectorNumElements();
SDValue ShiftBits = DAG.getConstant(NumElems - IdxVal, dl, MVT::i8);
Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
}
Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
}
// Inserting into the middle is more complicated.
NumElems = WideOpVT.getVectorNumElements();
// Widen the vector if needed.
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
// Move the current value of the bit to be replace to the lsbs.
Op = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
DAG.getConstant(IdxVal, dl, MVT::i8));
// Xor with the new bit.
Op = DAG.getNode(ISD::XOR, dl, WideOpVT, Op, SubVec);
// Shift to MSB, filling bottom bits with 0.
unsigned ShiftLeft = NumElems - SubVecNumElems;
Op = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Op,
DAG.getConstant(ShiftLeft, dl, MVT::i8));
// Shift to the final position, filling upper bits with 0.
unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
Op = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Op,
DAG.getConstant(ShiftRight, dl, MVT::i8));
// Xor with original vector leaving the new value.
Op = DAG.getNode(ISD::XOR, dl, WideOpVT, Vec, Op);
// Reduce to original width if needed.
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
}
static SDValue concatSubVectors(SDValue V1, SDValue V2, EVT VT,
unsigned NumElems, SelectionDAG &DAG,
const SDLoc &dl, unsigned VectorWidth) {
SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, VectorWidth);
return insertSubVector(V, V2, NumElems / 2, DAG, dl, VectorWidth);
}
/// Returns a vector of specified type with all bits set.
/// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
/// Then bitcast to their original type, ensuring they get CSE'd.
static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
"Expected a 128/256/512-bit vector type");
APInt Ones = APInt::getAllOnesValue(32);
unsigned NumElts = VT.getSizeInBits() / 32;
SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
return DAG.getBitcast(VT, Vec);
}
static SDValue getExtendInVec(unsigned Opc, const SDLoc &DL, EVT VT, SDValue In,
SelectionDAG &DAG) {
EVT InVT = In.getValueType();
assert((X86ISD::VSEXT == Opc || X86ISD::VZEXT == Opc) && "Unexpected opcode");
if (VT.is128BitVector() && InVT.is128BitVector())
return X86ISD::VSEXT == Opc ? DAG.getSignExtendVectorInReg(In, DL, VT)
: DAG.getZeroExtendVectorInReg(In, DL, VT);
// For 256-bit vectors, we only need the lower (128-bit) input half.
// For 512-bit vectors, we only need the lower input half or quarter.
if (VT.getSizeInBits() > 128 && InVT.getSizeInBits() > 128) {
int Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
In = extractSubVector(In, 0, DAG, DL,
std::max(128, (int)VT.getSizeInBits() / Scale));
}
return DAG.getNode(Opc, DL, VT, In);
}
/// Returns a vector_shuffle node for an unpackl operation.
static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
SDValue V1, SDValue V2) {
SmallVector<int, 8> Mask;
createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}
/// Returns a vector_shuffle node for an unpackh operation.
static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
SDValue V1, SDValue V2) {
SmallVector<int, 8> Mask;
createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
}
/// Return a vector_shuffle of the specified vector of zero or undef vector.
/// This produces a shuffle where the low element of V2 is swizzled into the
/// zero/undef vector, landing at element Idx.
/// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
bool IsZero,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = V2.getSimpleValueType();
SDValue V1 = IsZero
? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
int NumElems = VT.getVectorNumElements();
SmallVector<int, 16> MaskVec(NumElems);
for (int i = 0; i != NumElems; ++i)
// If this is the insertion idx, put the low elt of V2 here.
MaskVec[i] = (i == Idx) ? NumElems : i;
return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
}
static SDValue peekThroughBitcasts(SDValue V) {
while (V.getNode() && V.getOpcode() == ISD::BITCAST)
V = V.getOperand(0);
return V;
}
static SDValue peekThroughOneUseBitcasts(SDValue V) {
while (V.getNode() && V.getOpcode() == ISD::BITCAST &&
V.getOperand(0).hasOneUse())
V = V.getOperand(0);
return V;
}
// Peek through EXTRACT_SUBVECTORs - typically used for AVX1 256-bit intops.
static SDValue peekThroughEXTRACT_SUBVECTORs(SDValue V) {
while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR)
V = V.getOperand(0);
return V;
}
static const Constant *getTargetConstantFromNode(SDValue Op) {
Op = peekThroughBitcasts(Op);
auto *Load = dyn_cast<LoadSDNode>(Op);
if (!Load)
return nullptr;
SDValue Ptr = Load->getBasePtr();
if (Ptr->getOpcode() == X86ISD::Wrapper ||
Ptr->getOpcode() == X86ISD::WrapperRIP)
Ptr = Ptr->getOperand(0);
auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
if (!CNode || CNode->isMachineConstantPoolEntry())
return nullptr;
return dyn_cast<Constant>(CNode->getConstVal());
}
// Extract raw constant bits from constant pools.
static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
APInt &UndefElts,
SmallVectorImpl<APInt> &EltBits,
bool AllowWholeUndefs = true,
bool AllowPartialUndefs = true) {
assert(EltBits.empty() && "Expected an empty EltBits vector");
Op = peekThroughBitcasts(Op);
EVT VT = Op.getValueType();
unsigned SizeInBits = VT.getSizeInBits();
assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
unsigned NumElts = SizeInBits / EltSizeInBits;
// Bitcast a source array of element bits to the target size.
auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
unsigned NumSrcElts = UndefSrcElts.getBitWidth();
unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
"Constant bit sizes don't match");
// Don't split if we don't allow undef bits.
bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
if (UndefSrcElts.getBoolValue() && !AllowUndefs)
return false;
// If we're already the right size, don't bother bitcasting.
if (NumSrcElts == NumElts) {
UndefElts = UndefSrcElts;
EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
return true;
}
// Extract all the undef/constant element data and pack into single bitsets.
APInt UndefBits(SizeInBits, 0);
APInt MaskBits(SizeInBits, 0);
for (unsigned i = 0; i != NumSrcElts; ++i) {
unsigned BitOffset = i * SrcEltSizeInBits;
if (UndefSrcElts[i])
UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
MaskBits.insertBits(SrcEltBits[i], BitOffset);
}
// Split the undef/constant single bitset data into the target elements.
UndefElts = APInt(NumElts, 0);
EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
for (unsigned i = 0; i != NumElts; ++i) {
unsigned BitOffset = i * EltSizeInBits;
APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
// Only treat an element as UNDEF if all bits are UNDEF.
if (UndefEltBits.isAllOnesValue()) {
if (!AllowWholeUndefs)
return false;
UndefElts.setBit(i);
continue;
}
// If only some bits are UNDEF then treat them as zero (or bail if not
// supported).
if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
return false;
APInt Bits = MaskBits.extractBits(EltSizeInBits, BitOffset);
EltBits[i] = Bits.getZExtValue();
}
return true;
};
// Collect constant bits and insert into mask/undef bit masks.
auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
unsigned UndefBitIndex) {
if (!Cst)
return false;
if (isa<UndefValue>(Cst)) {
Undefs.setBit(UndefBitIndex);
return true;
}
if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
Mask = CInt->getValue();
return true;
}
if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
Mask = CFP->getValueAPF().bitcastToAPInt();
return true;
}
return false;
};
// Handle UNDEFs.
if (Op.isUndef()) {
APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
return CastBitData(UndefSrcElts, SrcEltBits);
}
// Extract scalar constant bits.
if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
APInt UndefSrcElts = APInt::getNullValue(1);
SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
return CastBitData(UndefSrcElts, SrcEltBits);
}
if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
APInt UndefSrcElts = APInt::getNullValue(1);
APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
SmallVector<APInt, 64> SrcEltBits(1, RawBits);
return CastBitData(UndefSrcElts, SrcEltBits);
}
// Extract constant bits from build vector.
if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
APInt UndefSrcElts(NumSrcElts, 0);
SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
const SDValue &Src = Op.getOperand(i);
if (Src.isUndef()) {
UndefSrcElts.setBit(i);
continue;
}
auto *Cst = cast<ConstantSDNode>(Src);
SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
}
return CastBitData(UndefSrcElts, SrcEltBits);
}
// Extract constant bits from constant pool vector.
if (auto *Cst = getTargetConstantFromNode(Op)) {
Type *CstTy = Cst->getType();
if (!CstTy->isVectorTy() || (SizeInBits != CstTy->getPrimitiveSizeInBits()))
return false;
unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
unsigned NumSrcElts = CstTy->getVectorNumElements();
APInt UndefSrcElts(NumSrcElts, 0);
SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
for (unsigned i = 0; i != NumSrcElts; ++i)
if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
UndefSrcElts, i))
return false;
return CastBitData(UndefSrcElts, SrcEltBits);
}
// Extract constant bits from a broadcasted constant pool scalar.
if (Op.getOpcode() == X86ISD::VBROADCAST &&
EltSizeInBits <= VT.getScalarSizeInBits()) {
if (auto *Broadcast = getTargetConstantFromNode(Op.getOperand(0))) {
unsigned SrcEltSizeInBits = Broadcast->getType()->getScalarSizeInBits();
unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
APInt UndefSrcElts(NumSrcElts, 0);
SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
if (CollectConstantBits(Broadcast, SrcEltBits[0], UndefSrcElts, 0)) {
if (UndefSrcElts[0])
UndefSrcElts.setBits(0, NumSrcElts);
SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
return CastBitData(UndefSrcElts, SrcEltBits);
}
}
}
// Extract a rematerialized scalar constant insertion.
if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
APInt UndefSrcElts(NumSrcElts, 0);
SmallVector<APInt, 64> SrcEltBits;
auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
return CastBitData(UndefSrcElts, SrcEltBits);
}
return false;
}
static bool getTargetShuffleMaskIndices(SDValue MaskNode,
unsigned MaskEltSizeInBits,
SmallVectorImpl<uint64_t> &RawMask) {
APInt UndefElts;
SmallVector<APInt, 64> EltBits;
// Extract the raw target constant bits.
// FIXME: We currently don't support UNDEF bits or mask entries.
if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
EltBits, /* AllowWholeUndefs */ false,
/* AllowPartialUndefs */ false))
return false;
// Insert the extracted elements into the mask.
for (APInt Elt : EltBits)
RawMask.push_back(Elt.getZExtValue());
return true;
}
/// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
/// Note: This ignores saturation, so inputs must be checked first.
static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
bool Unary) {
assert(Mask.empty() && "Expected an empty shuffle mask vector");
unsigned NumElts = VT.getVectorNumElements();
unsigned NumLanes = VT.getSizeInBits() / 128;
unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
unsigned Offset = Unary ? 0 : NumElts;
for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
Mask.push_back(Elt + (Lane * NumEltsPerLane));
for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
}
}
/// Calculates the shuffle mask corresponding to the target-specific opcode.
/// If the mask could be calculated, returns it in \p Mask, returns the shuffle
/// operands in \p Ops, and returns true.
/// Sets \p IsUnary to true if only one source is used. Note that this will set
/// IsUnary for shuffles which use a single input multiple times, and in those
/// cases it will adjust the mask to only have indices within that single input.
/// It is an error to call this with non-empty Mask/Ops vectors.
static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
SmallVectorImpl<SDValue> &Ops,
SmallVectorImpl<int> &Mask, bool &IsUnary) {
unsigned NumElems = VT.getVectorNumElements();
SDValue ImmN;
assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
IsUnary = false;
bool IsFakeUnary = false;
switch(N->getOpcode()) {
case X86ISD::BLENDI:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands()-1);
DecodeBLENDMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::SHUFP:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands()-1);
DecodeSHUFPMask(NumElems, VT.getScalarSizeInBits(),
cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::INSERTPS:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands()-1);
DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::EXTRQI:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
if (isa<ConstantSDNode>(N->getOperand(1)) &&
isa<ConstantSDNode>(N->getOperand(2))) {
int BitLen = N->getConstantOperandVal(1);
int BitIdx = N->getConstantOperandVal(2);
DecodeEXTRQIMask(NumElems, VT.getScalarSizeInBits(), BitLen, BitIdx,
Mask);
IsUnary = true;
}
break;
case X86ISD::INSERTQI:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
if (isa<ConstantSDNode>(N->getOperand(2)) &&
isa<ConstantSDNode>(N->getOperand(3))) {
int BitLen = N->getConstantOperandVal(2);
int BitIdx = N->getConstantOperandVal(3);
DecodeINSERTQIMask(NumElems, VT.getScalarSizeInBits(), BitLen, BitIdx,
Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
}
break;
case X86ISD::UNPCKH:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
DecodeUNPCKHMask(NumElems, VT.getScalarSizeInBits(), Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::UNPCKL:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
DecodeUNPCKLMask(NumElems, VT.getScalarSizeInBits(), Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::MOVHLPS:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
DecodeMOVHLPSMask(NumElems, Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::MOVLHPS:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
DecodeMOVLHPSMask(NumElems, Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::PALIGNR:
assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands()-1);
DecodePALIGNRMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
Ops.push_back(N->getOperand(1));
Ops.push_back(N->getOperand(0));
break;
case X86ISD::VSHLDQ:
assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodePSLLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = true;
break;
case X86ISD::VSRLDQ:
assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands() - 1);
DecodePSRLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = true;
break;
case X86ISD::PSHUFD:
case X86ISD::VPERMILPI:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands()-1);
DecodePSHUFMask(NumElems, VT.getScalarSizeInBits(),
cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = true;
break;
case X86ISD::PSHUFHW:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands()-1);
DecodePSHUFHWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = true;
break;
case X86ISD::PSHUFLW:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands()-1);
DecodePSHUFLWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = true;
break;
case X86ISD::VZEXT_MOVL:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
DecodeZeroMoveLowMask(NumElems, Mask);
IsUnary = true;
break;
case X86ISD::VBROADCAST: {
SDValue N0 = N->getOperand(0);
// See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
// add the pre-extracted value to the Ops vector.
if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
N0.getOperand(0).getValueType() == VT &&
N0.getConstantOperandVal(1) == 0)
Ops.push_back(N0.getOperand(0));
// We only decode broadcasts of same-sized vectors, unless the broadcast
// came from an extract from the original width. If we found one, we
// pushed it the Ops vector above.
if (N0.getValueType() == VT || !Ops.empty()) {
DecodeVectorBroadcast(NumElems, Mask);
IsUnary = true;
break;
}
return false;
}
case X86ISD::VPERMILPV: {
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
IsUnary = true;
SDValue MaskNode = N->getOperand(1);
unsigned MaskEltSize = VT.getScalarSizeInBits();
SmallVector<uint64_t, 32> RawMask;
if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
DecodeVPERMILPMask(NumElems, VT.getScalarSizeInBits(), RawMask, Mask);
break;
}
if (auto *C = getTargetConstantFromNode(MaskNode)) {
DecodeVPERMILPMask(C, MaskEltSize, Mask);
break;
}
return false;
}
case X86ISD::PSHUFB: {
assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
IsUnary = true;
SDValue MaskNode = N->getOperand(1);
SmallVector<uint64_t, 32> RawMask;
if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask)) {
DecodePSHUFBMask(RawMask, Mask);
break;
}
if (auto *C = getTargetConstantFromNode(MaskNode)) {
DecodePSHUFBMask(C, Mask);
break;
}
return false;
}
case X86ISD::VPERMI:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands()-1);
DecodeVPERMMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
IsUnary = true;
break;
case X86ISD::MOVSS:
case X86ISD::MOVSD:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
break;
case X86ISD::VPERM2X128:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands()-1);
DecodeVPERM2X128Mask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::SHUF128:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
ImmN = N->getOperand(N->getNumOperands()-1);
decodeVSHUF64x2FamilyMask(NumElems, VT.getScalarSizeInBits(),
cast<ConstantSDNode>(ImmN)->getZExtValue(),
Mask);
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
break;
case X86ISD::MOVSLDUP:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
DecodeMOVSLDUPMask(NumElems, Mask);
IsUnary = true;
break;
case X86ISD::MOVSHDUP:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
DecodeMOVSHDUPMask(NumElems, Mask);
IsUnary = true;
break;
case X86ISD::MOVDDUP:
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
DecodeMOVDDUPMask(NumElems, Mask);
IsUnary = true;
break;
case X86ISD::VPERMIL2: {
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
unsigned MaskEltSize = VT.getScalarSizeInBits();
SDValue MaskNode = N->getOperand(2);
SDValue CtrlNode = N->getOperand(3);
if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
unsigned CtrlImm = CtrlOp->getZExtValue();
SmallVector<uint64_t, 32> RawMask;
if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
DecodeVPERMIL2PMask(NumElems, VT.getScalarSizeInBits(), CtrlImm,
RawMask, Mask);
break;
}
if (auto *C = getTargetConstantFromNode(MaskNode)) {
DecodeVPERMIL2PMask(C, CtrlImm, MaskEltSize, Mask);
break;
}
}
return false;
}
case X86ISD::VPPERM: {
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
SDValue MaskNode = N->getOperand(2);
SmallVector<uint64_t, 32> RawMask;
if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask)) {
DecodeVPPERMMask(RawMask, Mask);
break;
}
if (auto *C = getTargetConstantFromNode(MaskNode)) {
DecodeVPPERMMask(C, Mask);
break;
}
return false;
}
case X86ISD::VPERMV: {
assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
IsUnary = true;
// Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
Ops.push_back(N->getOperand(1));
SDValue MaskNode = N->getOperand(0);
SmallVector<uint64_t, 32> RawMask;
unsigned MaskEltSize = VT.getScalarSizeInBits();
if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask)) {
DecodeVPERMVMask(RawMask, Mask);
break;
}
if (auto *C = getTargetConstantFromNode(MaskNode)) {
DecodeVPERMVMask(C, MaskEltSize, Mask);
break;
}
return false;
}
case X86ISD::VPERMV3: {
assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
// Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
Ops.push_back(N->getOperand(0));
Ops.push_back(N->getOperand(2));
SDValue MaskNode = N->getOperand(1);
unsigned MaskEltSize = VT.getScalarSizeInBits();
if (auto *C = getTargetConstantFromNode(MaskNode)) {
DecodeVPERMV3Mask(C, MaskEltSize, Mask);
break;
}
return false;
}
default: llvm_unreachable("unknown target shuffle node");
}
// Empty mask indicates the decode failed.
if (Mask.empty())
return false;
// Check if we're getting a shuffle mask with zero'd elements.
if (!AllowSentinelZero)
if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
return false;
// If we have a fake unary shuffle, the shuffle mask is spread across two
// inputs that are actually the same node. Re-map the mask to always point
// into the first input.
if (IsFakeUnary)
for (int &M : Mask)
if (M >= (int)Mask.size())
M -= Mask.size();
// If we didn't already add operands in the opcode-specific code, default to
// adding 1 or 2 operands starting at 0.
if (Ops.empty()) {
Ops.push_back(N->getOperand(0));
if (!IsUnary || IsFakeUnary)
Ops.push_back(N->getOperand(1));
}
return true;
}
/// Check a target shuffle mask's inputs to see if we can set any values to
/// SM_SentinelZero - this is for elements that are known to be zero
/// (not just zeroable) from their inputs.
/// Returns true if the target shuffle mask was decoded.
static bool setTargetShuffleZeroElements(SDValue N,
SmallVectorImpl<int> &Mask,
SmallVectorImpl<SDValue> &Ops) {
bool IsUnary;
if (!isTargetShuffle(N.getOpcode()))
return false;
MVT VT = N.getSimpleValueType();
if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
return false;
SDValue V1 = Ops[0];
SDValue V2 = IsUnary ? V1 : Ops[1];
V1 = peekThroughBitcasts(V1);
V2 = peekThroughBitcasts(V2);
assert((VT.getSizeInBits() % Mask.size()) == 0 &&
"Illegal split of shuffle value type");
unsigned EltSizeInBits = VT.getSizeInBits() / Mask.size();
// Extract known constant input data.
APInt UndefSrcElts[2];
SmallVector<APInt, 32> SrcEltBits[2];
bool IsSrcConstant[2] = {
getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
SrcEltBits[0], true, false),
getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
SrcEltBits[1], true, false)};
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
int M = Mask[i];
// Already decoded as SM_SentinelZero / SM_SentinelUndef.
if (M < 0)
continue;
// Determine shuffle input and normalize the mask.
unsigned SrcIdx = M / Size;
SDValue V = M < Size ? V1 : V2;
M %= Size;
// We are referencing an UNDEF input.
if (V.isUndef()) {
Mask[i] = SM_SentinelUndef;
continue;
}
// SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
// TODO: We currently only set UNDEF for integer types - floats use the same
// registers as vectors and many of the scalar folded loads rely on the
// SCALAR_TO_VECTOR pattern.
if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
(Size % V.getValueType().getVectorNumElements()) == 0) {
int Scale = Size / V.getValueType().getVectorNumElements();
int Idx = M / Scale;
if (Idx != 0 && !VT.isFloatingPoint())
Mask[i] = SM_SentinelUndef;
else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
Mask[i] = SM_SentinelZero;
continue;
}
// Attempt to extract from the source's constant bits.
if (IsSrcConstant[SrcIdx]) {
if (UndefSrcElts[SrcIdx][M])
Mask[i] = SM_SentinelUndef;
else if (SrcEltBits[SrcIdx][M] == 0)
Mask[i] = SM_SentinelZero;
}
}
assert(VT.getVectorNumElements() == Mask.size() &&
"Different mask size from vector size!");
return true;
}
// Attempt to decode ops that could be represented as a shuffle mask.
// The decoded shuffle mask may contain a different number of elements to the
// destination value type.
static bool getFauxShuffleMask(SDValue N, SmallVectorImpl<int> &Mask,
SmallVectorImpl<SDValue> &Ops,
const SelectionDAG &DAG) {
Mask.clear();
Ops.clear();
MVT VT = N.getSimpleValueType();
unsigned NumElts = VT.getVectorNumElements();
unsigned NumSizeInBits = VT.getSizeInBits();
unsigned NumBitsPerElt = VT.getScalarSizeInBits();
assert((NumBitsPerElt % 8) == 0 && (NumSizeInBits % 8) == 0 &&
"Expected byte aligned value types");
unsigned Opcode = N.getOpcode();
switch (Opcode) {
case ISD::VECTOR_SHUFFLE: {
// Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
Mask.append(ShuffleMask.begin(), ShuffleMask.end());
Ops.push_back(N.getOperand(0));
Ops.push_back(N.getOperand(1));
return true;
}
return false;
}
case ISD::AND:
case X86ISD::ANDNP: {
// Attempt to decode as a per-byte mask.
APInt UndefElts;
SmallVector<APInt, 32> EltBits;
SDValue N0 = N.getOperand(0);
SDValue N1 = N.getOperand(1);
bool IsAndN = (X86ISD::ANDNP == Opcode);
uint64_t ZeroMask = IsAndN ? 255 : 0;
if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
return false;
for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
if (UndefElts[i]) {
Mask.push_back(SM_SentinelUndef);
continue;
}
uint64_t ByteBits = EltBits[i].getZExtValue();
if (ByteBits != 0 && ByteBits != 255)
return false;
Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
}
Ops.push_back(IsAndN ? N1 : N0);
return true;
}
case ISD::SCALAR_TO_VECTOR: {
// Match against a scalar_to_vector of an extract from a vector,
// for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
SDValue N0 = N.getOperand(0);
SDValue SrcExtract;
if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
N0.getOperand(0).getValueType() == VT) ||
(N0.getOpcode() == X86ISD::PEXTRW &&
N0.getOperand(0).getValueType() == MVT::v8i16) ||
(N0.getOpcode() == X86ISD::PEXTRB &&
N0.getOperand(0).getValueType() == MVT::v16i8)) {
SrcExtract = N0;
}
if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
return false;
SDValue SrcVec = SrcExtract.getOperand(0);
EVT SrcVT = SrcVec.getValueType();
unsigned NumSrcElts = SrcVT.getVectorNumElements();
unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;
unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
if (NumSrcElts <= SrcIdx)
return false;
Ops.push_back(SrcVec);
Mask.push_back(SrcIdx);
Mask.append(NumZeros, SM_SentinelZero);
Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
return true;
}
case X86ISD::PINSRB:
case X86ISD::PINSRW: {
SDValue InVec = N.getOperand(0);
SDValue InScl = N.getOperand(1);
SDValue InIndex = N.getOperand(2);
if (!isa<ConstantSDNode>(InIndex) ||
cast<ConstantSDNode>(InIndex)->getAPIntValue().uge(NumElts))
return false;
uint64_t InIdx = N.getConstantOperandVal(2);
// Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
if (X86::isZeroNode(InScl)) {
Ops.push_back(InVec);
for (unsigned i = 0; i != NumElts; ++i)
Mask.push_back(i == InIdx ? SM_SentinelZero : (int)i);
return true;
}
// Attempt to recognise a PINSR*(PEXTR*) shuffle pattern.
// TODO: Expand this to support INSERT_VECTOR_ELT/etc.
unsigned ExOp =
(X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
if (InScl.getOpcode() != ExOp)
return false;
SDValue ExVec = InScl.getOperand(0);
SDValue ExIndex = InScl.getOperand(1);
if (!isa<ConstantSDNode>(ExIndex) ||
cast<ConstantSDNode>(ExIndex)->getAPIntValue().uge(NumElts))
return false;
uint64_t ExIdx = InScl.getConstantOperandVal(1);
Ops.push_back(InVec);
Ops.push_back(ExVec);
for (unsigned i = 0; i != NumElts; ++i)
Mask.push_back(i == InIdx ? NumElts + ExIdx : i);
return true;
}
case X86ISD::PACKSS:
case X86ISD::PACKUS: {
SDValue N0 = N.getOperand(0);
SDValue N1 = N.getOperand(1);
assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
"Unexpected input value type");
// If we know input saturation won't happen we can treat this
// as a truncation shuffle.
if (Opcode == X86ISD::PACKSS) {
if ((!N0.isUndef() && DAG.ComputeNumSignBits(N0) <= NumBitsPerElt) ||
(!N1.isUndef() && DAG.ComputeNumSignBits(N1) <= NumBitsPerElt))
return false;
} else {
APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
if ((!N0.isUndef() && !DAG.MaskedValueIsZero(N0, ZeroMask)) ||
(!N1.isUndef() && !DAG.MaskedValueIsZero(N1, ZeroMask)))
return false;
}
bool IsUnary = (N0 == N1);
Ops.push_back(N0);
if (!IsUnary)
Ops.push_back(N1);
createPackShuffleMask(VT, Mask, IsUnary);
return true;
}
case X86ISD::VSHLI:
case X86ISD::VSRLI: {
uint64_t ShiftVal = N.getConstantOperandVal(1);
// Out of range bit shifts are guaranteed to be zero.
if (NumBitsPerElt <= ShiftVal) {
Mask.append(NumElts, SM_SentinelZero);
return true;
}
// We can only decode 'whole byte' bit shifts as shuffles.
if ((ShiftVal % 8) != 0)
break;
uint64_t ByteShift = ShiftVal / 8;
unsigned NumBytes = NumSizeInBits / 8;
unsigned NumBytesPerElt = NumBitsPerElt / 8;
Ops.push_back(N.getOperand(0));
// Clear mask to all zeros and insert the shifted byte indices.
Mask.append(NumBytes, SM_SentinelZero);
if (X86ISD::VSHLI == Opcode) {
for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
Mask[i + j] = i + j - ByteShift;
} else {
for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
Mask[i + j - ByteShift] = i + j;
}
return true;
}
case ISD::ZERO_EXTEND_VECTOR_INREG:
case X86ISD::VZEXT: {
// TODO - add support for VPMOVZX with smaller input vector types.
SDValue Src = N.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
if (NumSizeInBits != SrcVT.getSizeInBits())
break;
DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), VT.getScalarSizeInBits(),
VT.getVectorNumElements(), Mask);
Ops.push_back(Src);
return true;
}
}
return false;
}
/// Removes unused shuffle source inputs and adjusts the shuffle mask accordingly.
static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
SmallVectorImpl<int> &Mask) {
int MaskWidth = Mask.size();
SmallVector<SDValue, 16> UsedInputs;
for (int i = 0, e = Inputs.size(); i < e; ++i) {
int lo = UsedInputs.size() * MaskWidth;
int hi = lo + MaskWidth;
// Strip UNDEF input usage.
if (Inputs[i].isUndef())
for (int &M : Mask)
if ((lo <= M) && (M < hi))
M = SM_SentinelUndef;
// Check for unused inputs.
if (any_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
UsedInputs.push_back(Inputs[i]);
continue;
}
for (int &M : Mask)
if (lo <= M)
M -= MaskWidth;
}
Inputs = UsedInputs;
}
/// Calls setTargetShuffleZeroElements to resolve a target shuffle mask's inputs
/// and set the SM_SentinelUndef and SM_SentinelZero values. Then check the
/// remaining input indices in case we now have a unary shuffle and adjust the
/// inputs accordingly.
/// Returns true if the target shuffle mask was decoded.
static bool resolveTargetShuffleInputs(SDValue Op,
SmallVectorImpl<SDValue> &Inputs,
SmallVectorImpl<int> &Mask,
const SelectionDAG &DAG) {
if (!setTargetShuffleZeroElements(Op, Mask, Inputs))
if (!getFauxShuffleMask(Op, Mask, Inputs, DAG))
return false;
resolveTargetShuffleInputsAndMask(Inputs, Mask);
return true;
}
/// Returns the scalar element that will make up the ith
/// element of the result of the vector shuffle.
static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
unsigned Depth) {
if (Depth == 6)
return SDValue(); // Limit search depth.
SDValue V = SDValue(N, 0);
EVT VT = V.getValueType();
unsigned Opcode = V.getOpcode();
// Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
int Elt = SV->getMaskElt(Index);
if (Elt < 0)
return DAG.getUNDEF(VT.getVectorElementType());
unsigned NumElems = VT.getVectorNumElements();
SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
: SV->getOperand(1);
return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
}
// Recurse into target specific vector shuffles to find scalars.
if (isTargetShuffle(Opcode)) {
MVT ShufVT = V.getSimpleValueType();
MVT ShufSVT = ShufVT.getVectorElementType();
int NumElems = (int)ShufVT.getVectorNumElements();
SmallVector<int, 16> ShuffleMask;
SmallVector<SDValue, 16> ShuffleOps;
bool IsUnary;
if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
return SDValue();
int Elt = ShuffleMask[Index];
if (Elt == SM_SentinelZero)
return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
: DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
if (Elt == SM_SentinelUndef)
return DAG.getUNDEF(ShufSVT);
assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range");
SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
Depth+1);
}
// Actual nodes that may contain scalar elements
if (Opcode == ISD::BITCAST) {
V = V.getOperand(0);
EVT SrcVT = V.getValueType();
unsigned NumElems = VT.getVectorNumElements();
if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
return SDValue();
}
if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
return (Index == 0) ? V.getOperand(0)
: DAG.getUNDEF(VT.getVectorElementType());
if (V.getOpcode() == ISD::BUILD_VECTOR)
return V.getOperand(Index);
return SDValue();
}
// Use PINSRB/PINSRW/PINSRD to create a build vector.
static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
unsigned NumElts = VT.getVectorNumElements();
assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
"Illegal vector insertion");
SDLoc dl(Op);
SDValue V;
bool First = true;
for (unsigned i = 0; i < NumElts; ++i) {
bool IsNonZero = (NonZeros & (1 << i)) != 0;
if (!IsNonZero)
continue;
// If the build vector contains zeros or our first insertion is not the
// first index then insert into zero vector to break any register
// dependency else use SCALAR_TO_VECTOR/VZEXT_MOVL.
if (First) {
First = false;
if (NumZero || 0 != i)
V = getZeroVector(VT, Subtarget, DAG, dl);
else {
assert(0 == i && "Expected insertion into zero-index");
V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
V = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, V);
V = DAG.getBitcast(VT, V);
continue;
}
}
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
DAG.getIntPtrConstant(i, dl));
}
return V;
}
/// Custom lower build_vector of v16i8.
static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (NumNonZero > 8 && !Subtarget.hasSSE41())
return SDValue();
// SSE4.1 - use PINSRB to insert each byte directly.
if (Subtarget.hasSSE41())
return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
Subtarget);
SDLoc dl(Op);
SDValue V;
bool First = true;
// Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
for (unsigned i = 0; i < 16; ++i) {
bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
if (ThisIsNonZero && First) {
if (NumZero)
V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
else
V = DAG.getUNDEF(MVT::v8i16);
First = false;
}
if ((i & 1) != 0) {
// FIXME: Investigate extending to i32 instead of just i16.
// FIXME: Investigate combining the first 4 bytes as a i32 instead.
SDValue ThisElt, LastElt;
bool LastIsNonZero = (NonZeros & (1 << (i - 1))) != 0;
if (LastIsNonZero) {
LastElt =
DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i - 1));
}
if (ThisIsNonZero) {
ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, ThisElt,
DAG.getConstant(8, dl, MVT::i8));
if (LastIsNonZero)
ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
} else
ThisElt = LastElt;
if (ThisElt) {
if (1 == i) {
V = NumZero ? DAG.getZExtOrTrunc(ThisElt, dl, MVT::i32)
: DAG.getAnyExtOrTrunc(ThisElt, dl, MVT::i32);
V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
V = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, V);
V = DAG.getBitcast(MVT::v8i16, V);
} else {
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
DAG.getIntPtrConstant(i / 2, dl));
}
}
}
}
return DAG.getBitcast(MVT::v16i8, V);
}
/// Custom lower build_vector of v8i16.
static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
unsigned NumNonZero, unsigned NumZero,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (NumNonZero > 4 && !Subtarget.hasSSE41())
return SDValue();
// Use PINSRW to insert each byte directly.
return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
Subtarget);
}
/// Custom lower build_vector of v4i32 or v4f32.
static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Find all zeroable elements.
std::bitset<4> Zeroable;
for (int i=0; i < 4; ++i) {
SDValue Elt = Op->getOperand(i);
Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
}
assert(Zeroable.size() - Zeroable.count() > 1 &&
"We expect at least two non-zero elements!");
// We only know how to deal with build_vector nodes where elements are either
// zeroable or extract_vector_elt with constant index.
SDValue FirstNonZero;
unsigned FirstNonZeroIdx;
for (unsigned i=0; i < 4; ++i) {
if (Zeroable[i])
continue;
SDValue Elt = Op->getOperand(i);
if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
!isa<ConstantSDNode>(Elt.getOperand(1)))
return SDValue();
// Make sure that this node is extracting from a 128-bit vector.
MVT VT = Elt.getOperand(0).getSimpleValueType();
if (!VT.is128BitVector())
return SDValue();
if (!FirstNonZero.getNode()) {
FirstNonZero = Elt;
FirstNonZeroIdx = i;
}
}
assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
SDValue V1 = FirstNonZero.getOperand(0);
MVT VT = V1.getSimpleValueType();
// See if this build_vector can be lowered as a blend with zero.
SDValue Elt;
unsigned EltMaskIdx, EltIdx;
int Mask[4];
for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
if (Zeroable[EltIdx]) {
// The zero vector will be on the right hand side.
Mask[EltIdx] = EltIdx+4;
continue;
}
Elt = Op->getOperand(EltIdx);
// By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
EltMaskIdx = Elt.getConstantOperandVal(1);
if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
break;
Mask[EltIdx] = EltIdx;
}
if (EltIdx == 4) {
// Let the shuffle legalizer deal with blend operations.
SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
if (V1.getSimpleValueType() != VT)
V1 = DAG.getBitcast(VT, V1);
return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, Mask);
}
// See if we can lower this build_vector to a INSERTPS.
if (!Subtarget.hasSSE41())
return SDValue();
SDValue V2 = Elt.getOperand(0);
if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
V1 = SDValue();
bool CanFold = true;
for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
if (Zeroable[i])
continue;
SDValue Current = Op->getOperand(i);
SDValue SrcVector = Current->getOperand(0);
if (!V1.getNode())
V1 = SrcVector;
CanFold = (SrcVector == V1) && (Current.getConstantOperandVal(1) == i);
}
if (!CanFold)
return SDValue();
assert(V1.getNode() && "Expected at least two non-zero elements!");
if (V1.getSimpleValueType() != MVT::v4f32)
V1 = DAG.getBitcast(MVT::v4f32, V1);
if (V2.getSimpleValueType() != MVT::v4f32)
V2 = DAG.getBitcast(MVT::v4f32, V2);
// Ok, we can emit an INSERTPS instruction.
unsigned ZMask = Zeroable.to_ulong();
unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
SDLoc DL(Op);
SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
DAG.getIntPtrConstant(InsertPSMask, DL));
return DAG.getBitcast(VT, Result);
}
/// Return a vector logical shift node.
static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
SelectionDAG &DAG, const TargetLowering &TLI,
const SDLoc &dl) {
assert(VT.is128BitVector() && "Unknown type for VShift");
MVT ShVT = MVT::v16i8;
unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
SrcOp = DAG.getBitcast(ShVT, SrcOp);
assert(NumBits % 8 == 0 && "Only support byte sized shifts");
SDValue ShiftVal = DAG.getConstant(NumBits/8, dl, MVT::i8);
return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
}
static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
SelectionDAG &DAG) {
// Check if the scalar load can be widened into a vector load. And if
// the address is "base + cst" see if the cst can be "absorbed" into
// the shuffle mask.
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
SDValue Ptr = LD->getBasePtr();
if (!ISD::isNormalLoad(LD) || LD->isVolatile())
return SDValue();
EVT PVT = LD->getValueType(0);
if (PVT != MVT::i32 && PVT != MVT::f32)
return SDValue();
int FI = -1;
int64_t Offset = 0;
if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
FI = FINode->getIndex();
Offset = 0;
} else if (DAG.isBaseWithConstantOffset(Ptr) &&
isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
Offset = Ptr.getConstantOperandVal(1);
Ptr = Ptr.getOperand(0);
} else {
return SDValue();
}
// FIXME: 256-bit vector instructions don't require a strict alignment,
// improve this code to support it better.
unsigned RequiredAlign = VT.getSizeInBits()/8;
SDValue Chain = LD->getChain();
// Make sure the stack object alignment is at least 16 or 32.
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
if (MFI.isFixedObjectIndex(FI)) {
// Can't change the alignment. FIXME: It's possible to compute
// the exact stack offset and reference FI + adjust offset instead.
// If someone *really* cares about this. That's the way to implement it.
return SDValue();
} else {
MFI.setObjectAlignment(FI, RequiredAlign);
}
}
// (Offset % 16 or 32) must be multiple of 4. Then address is then
// Ptr + (Offset & ~15).
if (Offset < 0)
return SDValue();
if ((Offset % RequiredAlign) & 3)
return SDValue();
int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
if (StartOffset) {
SDLoc DL(Ptr);
Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
}
int EltNo = (Offset - StartOffset) >> 2;
unsigned NumElems = VT.getVectorNumElements();
EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
LD->getPointerInfo().getWithOffset(StartOffset));
SmallVector<int, 8> Mask(NumElems, EltNo);
return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
}
return SDValue();
}
/// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
/// elements can be replaced by a single large load which has the same value as
/// a build_vector or insert_subvector whose loaded operands are 'Elts'.
///
/// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
const SDLoc &DL, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
bool isAfterLegalize) {
unsigned NumElems = Elts.size();
int LastLoadedElt = -1;
SmallBitVector LoadMask(NumElems, false);
SmallBitVector ZeroMask(NumElems, false);
SmallBitVector UndefMask(NumElems, false);
// For each element in the initializer, see if we've found a load, zero or an
// undef.
for (unsigned i = 0; i < NumElems; ++i) {
SDValue Elt = peekThroughBitcasts(Elts[i]);
if (!Elt.getNode())
return SDValue();
if (Elt.isUndef())
UndefMask[i] = true;
else if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode()))
ZeroMask[i] = true;
else if (ISD::isNON_EXTLoad(Elt.getNode())) {
LoadMask[i] = true;
LastLoadedElt = i;
// Each loaded element must be the correct fractional portion of the
// requested vector load.
if ((NumElems * Elt.getValueSizeInBits()) != VT.getSizeInBits())
return SDValue();
} else
return SDValue();
}
assert((ZeroMask | UndefMask | LoadMask).count() == NumElems &&
"Incomplete element masks");
// Handle Special Cases - all undef or undef/zero.
if (UndefMask.count() == NumElems)
return DAG.getUNDEF(VT);
// FIXME: Should we return this as a BUILD_VECTOR instead?
if ((ZeroMask | UndefMask).count() == NumElems)
return VT.isInteger() ? DAG.getConstant(0, DL, VT)
: DAG.getConstantFP(0.0, DL, VT);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
int FirstLoadedElt = LoadMask.find_first();
SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
LoadSDNode *LDBase = cast<LoadSDNode>(EltBase);
EVT LDBaseVT = EltBase.getValueType();
// Consecutive loads can contain UNDEFS but not ZERO elements.
// Consecutive loads with UNDEFs and ZEROs elements require a
// an additional shuffle stage to clear the ZERO elements.
bool IsConsecutiveLoad = true;
bool IsConsecutiveLoadWithZeros = true;
for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
if (LoadMask[i]) {
SDValue Elt = peekThroughBitcasts(Elts[i]);
LoadSDNode *LD = cast<LoadSDNode>(Elt);
if (!DAG.areNonVolatileConsecutiveLoads(
LD, LDBase, Elt.getValueType().getStoreSizeInBits() / 8,
i - FirstLoadedElt)) {
IsConsecutiveLoad = false;
IsConsecutiveLoadWithZeros = false;
break;
}
} else if (ZeroMask[i]) {
IsConsecutiveLoad = false;
}
}
SmallVector<LoadSDNode *, 8> Loads;
for (int i = FirstLoadedElt; i <= LastLoadedElt; ++i)
if (LoadMask[i])
Loads.push_back(cast<LoadSDNode>(peekThroughBitcasts(Elts[i])));
auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
auto MMOFlags = LDBase->getMemOperand()->getFlags();
assert(!(MMOFlags & MachineMemOperand::MOVolatile) &&
"Cannot merge volatile loads.");
SDValue NewLd =
DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
for (auto *LD : Loads)
DAG.makeEquivalentMemoryOrdering(LD, NewLd);
return NewLd;
};
// LOAD - all consecutive load/undefs (must start/end with a load).
// If we have found an entire vector of loads and undefs, then return a large
// load of the entire vector width starting at the base pointer.
// If the vector contains zeros, then attempt to shuffle those elements.
if (FirstLoadedElt == 0 && LastLoadedElt == (int)(NumElems - 1) &&
(IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
assert(LDBase && "Did not find base load for merging consecutive loads");
EVT EltVT = LDBase->getValueType(0);
// Ensure that the input vector size for the merged loads matches the
// cumulative size of the input elements.
if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
return SDValue();
if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
return SDValue();
// Don't create 256-bit non-temporal aligned loads without AVX2 as these
// will lower to regular temporal loads and use the cache.
if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 &&
VT.is256BitVector() && !Subtarget.hasInt256())
return SDValue();
if (IsConsecutiveLoad)
return CreateLoad(VT, LDBase);
// IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
// vector and a zero vector to clear out the zero elements.
if (!isAfterLegalize && NumElems == VT.getVectorNumElements()) {
SmallVector<int, 4> ClearMask(NumElems, -1);
for (unsigned i = 0; i < NumElems; ++i) {
if (ZeroMask[i])
ClearMask[i] = i + NumElems;
else if (LoadMask[i])
ClearMask[i] = i;
}
SDValue V = CreateLoad(VT, LDBase);
SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
: DAG.getConstantFP(0.0, DL, VT);
return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
}
}
int LoadSize =
(1 + LastLoadedElt - FirstLoadedElt) * LDBaseVT.getStoreSizeInBits();
// VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
(LoadSize == 32 || LoadSize == 64) &&
((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSize)
: MVT::getIntegerVT(LoadSize);
MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSize);
if (TLI.isTypeLegal(VecVT)) {
SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
SDValue ResNode =
DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
LDBase->getPointerInfo(),
LDBase->getAlignment(),
MachineMemOperand::MOLoad);
for (auto *LD : Loads)
DAG.makeEquivalentMemoryOrdering(LD, ResNode);
return DAG.getBitcast(VT, ResNode);
}
}
return SDValue();
}
static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
unsigned SplatBitSize, LLVMContext &C) {
unsigned ScalarSize = VT.getScalarSizeInBits();
unsigned NumElm = SplatBitSize / ScalarSize;
SmallVector<Constant *, 32> ConstantVec;
for (unsigned i = 0; i < NumElm; i++) {
APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
Constant *Const;
if (VT.isFloatingPoint()) {
if (ScalarSize == 32) {
Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
} else {
assert(ScalarSize == 64 && "Unsupported floating point scalar size");
Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
}
} else
Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
ConstantVec.push_back(Const);
}
return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
}
static bool isUseOfShuffle(SDNode *N) {
for (auto *U : N->uses()) {
if (isTargetShuffle(U->getOpcode()))
return true;
if (U->getOpcode() == ISD::BITCAST) // Ignore bitcasts
return isUseOfShuffle(U);
}
return false;
}
// Check if the current node of build vector is a zero extended vector.
// // If so, return the value extended.
// // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a.
// // NumElt - return the number of zero extended identical values.
// // EltType - return the type of the value include the zero extend.
static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op,
unsigned &NumElt, MVT &EltType) {
SDValue ExtValue = Op->getOperand(0);
unsigned NumElts = Op->getNumOperands();
unsigned Delta = NumElts;
for (unsigned i = 1; i < NumElts; i++) {
if (Op->getOperand(i) == ExtValue) {
Delta = i;
break;
}
if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i))))
return SDValue();
}
if (!isPowerOf2_32(Delta) || Delta == 1)
return SDValue();
for (unsigned i = Delta; i < NumElts; i++) {
if (i % Delta == 0) {
if (Op->getOperand(i) != ExtValue)
return SDValue();
} else if (!(isNullConstant(Op->getOperand(i)) ||
Op->getOperand(i).isUndef()))
return SDValue();
}
unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits();
unsigned ExtVTSize = EltSize * Delta;
EltType = MVT::getIntegerVT(ExtVTSize);
NumElt = NumElts / Delta;
return ExtValue;
}
/// Attempt to use the vbroadcast instruction to generate a splat value
/// from a splat BUILD_VECTOR which uses:
/// a. A single scalar load, or a constant.
/// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
///
/// The VBROADCAST node is returned when a pattern is found,
/// or SDValue() otherwise.
static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// VBROADCAST requires AVX.
// TODO: Splats could be generated for non-AVX CPUs using SSE
// instructions, but there's less potential gain for only 128-bit vectors.
if (!Subtarget.hasAVX())
return SDValue();
MVT VT = BVOp->getSimpleValueType(0);
SDLoc dl(BVOp);
assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
"Unsupported vector type for broadcast.");
BitVector UndefElements;
SDValue Ld = BVOp->getSplatValue(&UndefElements);
// Attempt to use VBROADCASTM
// From this paterrn:
// a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
// b. t1 = (build_vector t0 t0)
//
// Create (VBROADCASTM v2i1 X)
if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) {
MVT EltType = VT.getScalarType();
unsigned NumElts = VT.getVectorNumElements();
SDValue BOperand;
SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
(Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
if (ZeroExtended)
BOperand = ZeroExtended.getOperand(0);
else
BOperand = Ld.getOperand(0).getOperand(0);
MVT MaskVT = BOperand.getSimpleValueType();
if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
(EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
SDValue Brdcst =
DAG.getNode(X86ISD::VBROADCASTM, dl,
MVT::getVectorVT(EltType, NumElts), BOperand);
return DAG.getBitcast(VT, Brdcst);
}
}
}
// We need a splat of a single value to use broadcast, and it doesn't
// make any sense if the value is only in one element of the vector.
if (!Ld || (VT.getVectorNumElements() - UndefElements.count()) <= 1) {
APInt SplatValue, Undef;
unsigned SplatBitSize;
bool HasUndef;
// Check if this is a repeated constant pattern suitable for broadcasting.
if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
SplatBitSize > VT.getScalarSizeInBits() &&
SplatBitSize < VT.getSizeInBits()) {
// Avoid replacing with broadcast when it's a use of a shuffle
// instruction to preserve the present custom lowering of shuffles.
if (isUseOfShuffle(BVOp) || BVOp->hasOneUse())
return SDValue();
// replace BUILD_VECTOR with broadcast of the repeated constants.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
LLVMContext *Ctx = DAG.getContext();
MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
if (Subtarget.hasAVX()) {
if (SplatBitSize <= 64 && Subtarget.hasAVX2() &&
!(SplatBitSize == 64 && Subtarget.is32Bit())) {
// Splatted value can fit in one INTEGER constant in constant pool.
// Load the constant and broadcast it.
MVT CVT = MVT::getIntegerVT(SplatBitSize);
Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
SDValue CP = DAG.getConstantPool(C, PVT);
unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
Ld = DAG.getLoad(
CVT, dl, DAG.getEntryNode(), CP,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
Alignment);
SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
MVT::getVectorVT(CVT, Repeat), Ld);
return DAG.getBitcast(VT, Brdcst);
} else if (SplatBitSize == 32 || SplatBitSize == 64) {
// Splatted value can fit in one FLOAT constant in constant pool.
// Load the constant and broadcast it.
// AVX have support for 32 and 64 bit broadcast for floats only.
// No 64bit integer in 32bit subtarget.
MVT CVT = MVT::getFloatingPointVT(SplatBitSize);
// Lower the splat via APFloat directly, to avoid any conversion.
Constant *C =
SplatBitSize == 32
? ConstantFP::get(*Ctx,
APFloat(APFloat::IEEEsingle(), SplatValue))
: ConstantFP::get(*Ctx,
APFloat(APFloat::IEEEdouble(), SplatValue));
SDValue CP = DAG.getConstantPool(C, PVT);
unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
Ld = DAG.getLoad(
CVT, dl, DAG.getEntryNode(), CP,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
Alignment);
SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
MVT::getVectorVT(CVT, Repeat), Ld);
return DAG.getBitcast(VT, Brdcst);
} else if (SplatBitSize > 64) {
// Load the vector of constants and broadcast it.
MVT CVT = VT.getScalarType();
Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
*Ctx);
SDValue VCP = DAG.getConstantPool(VecC, PVT);
unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
unsigned Alignment = cast<ConstantPoolSDNode>(VCP)->getAlignment();
Ld = DAG.getLoad(
MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
Alignment);
SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
return DAG.getBitcast(VT, Brdcst);
}
}
}
return SDValue();
}
bool ConstSplatVal =
(Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
// Make sure that all of the users of a non-constant load are from the
// BUILD_VECTOR node.
if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
return SDValue();
unsigned ScalarSize = Ld.getValueSizeInBits();
bool IsGE256 = (VT.getSizeInBits() >= 256);
// When optimizing for size, generate up to 5 extra bytes for a broadcast
// instruction to save 8 or more bytes of constant pool data.
// TODO: If multiple splats are generated to load the same constant,
// it may be detrimental to overall size. There needs to be a way to detect
// that condition to know if this is truly a size win.
bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
// Handle broadcasting a single constant scalar from the constant pool
// into a vector.
// On Sandybridge (no AVX2), it is still better to load a constant vector
// from the constant pool and not to broadcast it from a scalar.
// But override that restriction when optimizing for size.
// TODO: Check if splatting is recommended for other AVX-capable CPUs.
if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
EVT CVT = Ld.getValueType();
assert(!CVT.isVector() && "Must not broadcast a vector type");
// Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
// For size optimization, also splat v2f64 and v2i64, and for size opt
// with AVX2, also splat i8 and i16.
// With pattern matching, the VBROADCAST node may become a VMOVDDUP.
if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
(OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
const Constant *C = nullptr;
if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
C = CI->getConstantIntValue();
else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
C = CF->getConstantFPValue();
assert(C && "Invalid constant type");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
SDValue CP =
DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
Ld = DAG.getLoad(
CVT, dl, DAG.getEntryNode(), CP,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
Alignment);
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
}
}
bool IsLoad = ISD::isNormalLoad(Ld.getNode());
// Handle AVX2 in-register broadcasts.
if (!IsLoad && Subtarget.hasInt256() &&
(ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
// The scalar source must be a normal load.
if (!IsLoad)
return SDValue();
if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
(Subtarget.hasVLX() && ScalarSize == 64))
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
// The integer check is needed for the 64-bit into 128-bit so it doesn't match
// double since there is no vbroadcastsd xmm
if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
}
// Unsupported broadcast.
return SDValue();
}
/// For an EXTRACT_VECTOR_ELT with a constant index return the real
/// underlying vector and index.
///
/// Modifies \p ExtractedFromVec to the real vector and returns the real
/// index.
static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
SDValue ExtIdx) {
int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
return Idx;
// For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
// lowered this:
// (extract_vector_elt (v8f32 %1), Constant<6>)
// to:
// (extract_vector_elt (vector_shuffle<2,u,u,u>
// (extract_subvector (v8f32 %0), Constant<4>),
// undef)
// Constant<0>)
// In this case the vector is the extract_subvector expression and the index
// is 2, as specified by the shuffle.
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
SDValue ShuffleVec = SVOp->getOperand(0);
MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
assert(ShuffleVecVT.getVectorElementType() ==
ExtractedFromVec.getSimpleValueType().getVectorElementType());
int ShuffleIdx = SVOp->getMaskElt(Idx);
if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
ExtractedFromVec = ShuffleVec;
return ShuffleIdx;
}
return Idx;
}
static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
// Skip if insert_vec_elt is not supported.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
return SDValue();
SDLoc DL(Op);
unsigned NumElems = Op.getNumOperands();
SDValue VecIn1;
SDValue VecIn2;
SmallVector<unsigned, 4> InsertIndices;
SmallVector<int, 8> Mask(NumElems, -1);
for (unsigned i = 0; i != NumElems; ++i) {
unsigned Opc = Op.getOperand(i).getOpcode();
if (Opc == ISD::UNDEF)
continue;
if (Opc != ISD::EXTRACT_VECTOR_ELT) {
// Quit if more than 1 elements need inserting.
if (InsertIndices.size() > 1)
return SDValue();
InsertIndices.push_back(i);
continue;
}
SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
SDValue ExtIdx = Op.getOperand(i).getOperand(1);
// Quit if non-constant index.
if (!isa<ConstantSDNode>(ExtIdx))
return SDValue();
int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
// Quit if extracted from vector of different type.
if (ExtractedFromVec.getValueType() != VT)
return SDValue();
if (!VecIn1.getNode())
VecIn1 = ExtractedFromVec;
else if (VecIn1 != ExtractedFromVec) {
if (!VecIn2.getNode())
VecIn2 = ExtractedFromVec;
else if (VecIn2 != ExtractedFromVec)
// Quit if more than 2 vectors to shuffle
return SDValue();
}
if (ExtractedFromVec == VecIn1)
Mask[i] = Idx;
else if (ExtractedFromVec == VecIn2)
Mask[i] = Idx + NumElems;
}
if (!VecIn1.getNode())
return SDValue();
VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
for (unsigned Idx : InsertIndices)
NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
DAG.getIntPtrConstant(Idx, DL));
return NV;
}
static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
Op.getScalarValueSizeInBits() == 1 &&
"Can not convert non-constant vector");
uint64_t Immediate = 0;
for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
SDValue In = Op.getOperand(idx);
if (!In.isUndef())
Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
}
SDLoc dl(Op);
MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
return DAG.getConstant(Immediate, dl, VT);
}
// Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
assert((VT.getVectorElementType() == MVT::i1) &&
"Unexpected type in LowerBUILD_VECTORvXi1!");
SDLoc dl(Op);
if (ISD::isBuildVectorAllZeros(Op.getNode()))
return Op;
if (ISD::isBuildVectorAllOnes(Op.getNode()))
return Op;
if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
// Split the pieces.
SDValue Lower =
DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(0, 32));
SDValue Upper =
DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(32, 32));
// We have to manually lower both halves so getNode doesn't try to
// reassemble the build_vector.
Lower = LowerBUILD_VECTORvXi1(Lower, DAG, Subtarget);
Upper = LowerBUILD_VECTORvXi1(Upper, DAG, Subtarget);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lower, Upper);
}
SDValue Imm = ConvertI1VectorToInteger(Op, DAG);
if (Imm.getValueSizeInBits() == VT.getSizeInBits())
return DAG.getBitcast(VT, Imm);
SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
DAG.getIntPtrConstant(0, dl));
}
// Vector has one or more non-const elements
uint64_t Immediate = 0;
SmallVector<unsigned, 16> NonConstIdx;
bool IsSplat = true;
bool HasConstElts = false;
int SplatIdx = -1;
for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
SDValue In = Op.getOperand(idx);
if (In.isUndef())
continue;
if (!isa<ConstantSDNode>(In))
NonConstIdx.push_back(idx);
else {
Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
HasConstElts = true;
}
if (SplatIdx < 0)
SplatIdx = idx;
else if (In != Op.getOperand(SplatIdx))
IsSplat = false;
}
// for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
if (IsSplat)
return DAG.getSelect(dl, VT, Op.getOperand(SplatIdx),
DAG.getConstant(1, dl, VT),
DAG.getConstant(0, dl, VT));
// insert elements one by one
SDValue DstVec;
SDValue Imm;
if (Immediate) {
MVT ImmVT = MVT::getIntegerVT(std::max((int)VT.getSizeInBits(), 8));
Imm = DAG.getConstant(Immediate, dl, ImmVT);
}
else if (HasConstElts)
Imm = DAG.getConstant(0, dl, VT);
else
Imm = DAG.getUNDEF(VT);
if (Imm.getValueSizeInBits() == VT.getSizeInBits())
DstVec = DAG.getBitcast(VT, Imm);
else {
SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm);
DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec,
DAG.getIntPtrConstant(0, dl));
}
for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
unsigned InsertIdx = NonConstIdx[i];
DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
Op.getOperand(InsertIdx),
DAG.getIntPtrConstant(InsertIdx, dl));
}
return DstVec;
}
/// Return true if \p N implements a horizontal binop and return the
/// operands for the horizontal binop into V0 and V1.
///
/// This is a helper function of LowerToHorizontalOp().
/// This function checks that the build_vector \p N in input implements a
/// horizontal operation. Parameter \p Opcode defines the kind of horizontal
/// operation to match.
/// For example, if \p Opcode is equal to ISD::ADD, then this function
/// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
/// is equal to ISD::SUB, then this function checks if this is a horizontal
/// arithmetic sub.
///
/// This function only analyzes elements of \p N whose indices are
/// in range [BaseIdx, LastIdx).
static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
SelectionDAG &DAG,
unsigned BaseIdx, unsigned LastIdx,
SDValue &V0, SDValue &V1) {
EVT VT = N->getValueType(0);
assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
"Invalid Vector in input!");
bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
bool CanFold = true;
unsigned ExpectedVExtractIdx = BaseIdx;
unsigned NumElts = LastIdx - BaseIdx;
V0 = DAG.getUNDEF(VT);
V1 = DAG.getUNDEF(VT);
// Check if N implements a horizontal binop.
for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
SDValue Op = N->getOperand(i + BaseIdx);
// Skip UNDEFs.
if (Op->isUndef()) {
// Update the expected vector extract index.
if (i * 2 == NumElts)
ExpectedVExtractIdx = BaseIdx;
ExpectedVExtractIdx += 2;
continue;
}
CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
if (!CanFold)
break;
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
// Try to match the following pattern:
// (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
Op0.getOperand(0) == Op1.getOperand(0) &&
isa<ConstantSDNode>(Op0.getOperand(1)) &&
isa<ConstantSDNode>(Op1.getOperand(1)));
if (!CanFold)
break;
unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
if (i * 2 < NumElts) {
if (V0.isUndef()) {
V0 = Op0.getOperand(0);
if (V0.getValueType() != VT)
return false;
}
} else {
if (V1.isUndef()) {
V1 = Op0.getOperand(0);
if (V1.getValueType() != VT)
return false;
}
if (i * 2 == NumElts)
ExpectedVExtractIdx = BaseIdx;
}
SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
if (I0 == ExpectedVExtractIdx)
CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
else if (IsCommutable && I1 == ExpectedVExtractIdx) {
// Try to match the following dag sequence:
// (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
} else
CanFold = false;
ExpectedVExtractIdx += 2;
}
return CanFold;
}
/// Emit a sequence of two 128-bit horizontal add/sub followed by
/// a concat_vector.
///
/// This is a helper function of LowerToHorizontalOp().
/// This function expects two 256-bit vectors called V0 and V1.
/// At first, each vector is split into two separate 128-bit vectors.
/// Then, the resulting 128-bit vectors are used to implement two
/// horizontal binary operations.
///
/// The kind of horizontal binary operation is defined by \p X86Opcode.
///
/// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
/// the two new horizontal binop.
/// When Mode is set, the first horizontal binop dag node would take as input
/// the lower 128-bit of V0 and the upper 128-bit of V0. The second
/// horizontal binop dag node would take as input the lower 128-bit of V1
/// and the upper 128-bit of V1.
/// Example:
/// HADD V0_LO, V0_HI
/// HADD V1_LO, V1_HI
///
/// Otherwise, the first horizontal binop dag node takes as input the lower
/// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
/// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
/// Example:
/// HADD V0_LO, V1_LO
/// HADD V0_HI, V1_HI
///
/// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
/// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
/// the upper 128-bits of the result.
static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
const SDLoc &DL, SelectionDAG &DAG,
unsigned X86Opcode, bool Mode,
bool isUndefLO, bool isUndefHI) {
MVT VT = V0.getSimpleValueType();
assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
"Invalid nodes in input!");
unsigned NumElts = VT.getVectorNumElements();
SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
MVT NewVT = V0_LO.getSimpleValueType();
SDValue LO = DAG.getUNDEF(NewVT);
SDValue HI = DAG.getUNDEF(NewVT);
if (Mode) {
// Don't emit a horizontal binop if the result is expected to be UNDEF.
if (!isUndefLO && !V0->isUndef())
LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
if (!isUndefHI && !V1->isUndef())
HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
} else {
// Don't emit a horizontal binop if the result is expected to be UNDEF.
if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
}
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
}
/// Returns true iff \p BV builds a vector with the result equivalent to
/// the result of ADDSUB/SUBADD operation.
/// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
/// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
/// \p Opnd0 and \p Opnd1.
static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
const X86Subtarget &Subtarget, SelectionDAG &DAG,
SDValue &Opnd0, SDValue &Opnd1,
unsigned &NumExtracts,
bool &IsSubAdd) {
MVT VT = BV->getSimpleValueType(0);
if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
return false;
unsigned NumElts = VT.getVectorNumElements();
SDValue InVec0 = DAG.getUNDEF(VT);
SDValue InVec1 = DAG.getUNDEF(VT);
NumExtracts = 0;
// Odd-numbered elements in the input build vector are obtained from
// adding/subtracting two integer/float elements.
// Even-numbered elements in the input build vector are obtained from
// subtracting/adding two integer/float elements.
unsigned Opc[2] {0, 0};
for (unsigned i = 0, e = NumElts; i != e; ++i) {
SDValue Op = BV->getOperand(i);
// Skip 'undef' values.
unsigned Opcode = Op.getOpcode();
if (Opcode == ISD::UNDEF)
continue;
// Early exit if we found an unexpected opcode.
if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
return false;
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
// Try to match the following pattern:
// (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
// Early exit if we cannot match that sequence.
if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
!isa<ConstantSDNode>(Op0.getOperand(1)) ||
!isa<ConstantSDNode>(Op1.getOperand(1)) ||
Op0.getOperand(1) != Op1.getOperand(1))
return false;
unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
if (I0 != i)
return false;
// We found a valid add/sub node, make sure its the same opcode as previous
// elements for this parity.
if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
return false;
Opc[i % 2] = Opcode;
// Update InVec0 and InVec1.
if (InVec0.isUndef()) {
InVec0 = Op0.getOperand(0);
if (InVec0.getSimpleValueType() != VT)
return false;
}
if (InVec1.isUndef()) {
InVec1 = Op1.getOperand(0);
if (InVec1.getSimpleValueType() != VT)
return false;
}
// Make sure that operands in input to each add/sub node always
// come from a same pair of vectors.
if (InVec0 != Op0.getOperand(0)) {
if (Opcode == ISD::FSUB)
return false;
// FADD is commutable. Try to commute the operands
// and then test again.
std::swap(Op0, Op1);
if (InVec0 != Op0.getOperand(0))
return false;
}
if (InVec1 != Op1.getOperand(0))
return false;
// Increment the number of extractions done.
++NumExtracts;
}
// Ensure we have found an opcode for both parities and that they are
// different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
// inputs are undef.
if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
InVec0.isUndef() || InVec1.isUndef())
return false;
IsSubAdd = Opc[0] == ISD::FADD;
Opnd0 = InVec0;
Opnd1 = InVec1;
return true;
}
/// Returns true if is possible to fold MUL and an idiom that has already been
/// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
/// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
/// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
///
/// Prior to calling this function it should be known that there is some
/// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
/// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
/// before replacement of such SDNode with ADDSUB operation. Thus the number
/// of \p Opnd0 uses is expected to be equal to 2.
/// For example, this function may be called for the following IR:
/// %AB = fmul fast <2 x double> %A, %B
/// %Sub = fsub fast <2 x double> %AB, %C
/// %Add = fadd fast <2 x double> %AB, %C
/// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
/// <2 x i32> <i32 0, i32 3>
/// There is a def for %Addsub here, which potentially can be replaced by
/// X86ISD::ADDSUB operation:
/// %Addsub = X86ISD::ADDSUB %AB, %C
/// and such ADDSUB can further be replaced with FMADDSUB:
/// %Addsub = FMADDSUB %A, %B, %C.
///
/// The main reason why this method is called before the replacement of the
/// recognized ADDSUB idiom with ADDSUB operation is that such replacement
/// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
/// FMADDSUB is.
static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
SelectionDAG &DAG,
SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
unsigned ExpectedUses) {
if (Opnd0.getOpcode() != ISD::FMUL ||
!Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
return false;
// FIXME: These checks must match the similar ones in
// DAGCombiner::visitFADDForFMACombine. It would be good to have one
// function that would answer if it is Ok to fuse MUL + ADD to FMADD
// or MUL + ADDSUB to FMADDSUB.
const TargetOptions &Options = DAG.getTarget().Options;
bool AllowFusion =
(Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
if (!AllowFusion)
return false;
Opnd2 = Opnd1;
Opnd1 = Opnd0.getOperand(1);
Opnd0 = Opnd0.getOperand(0);
return true;
}
/// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
/// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
/// X86ISD::FMSUBADD node.
static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Opnd0, Opnd1;
unsigned NumExtracts;
bool IsSubAdd;
if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
IsSubAdd))
return SDValue();
MVT VT = BV->getSimpleValueType(0);
SDLoc DL(BV);
// Try to generate X86ISD::FMADDSUB node here.
SDValue Opnd2;
if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
}
// We only support ADDSUB.
if (IsSubAdd)
return SDValue();
// Do not generate X86ISD::ADDSUB node for 512-bit types even though
// the ADDSUB idiom has been successfully recognized. There are no known
// X86 targets with 512-bit ADDSUB instructions!
// 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
// recognition.
if (VT.is512BitVector())
return SDValue();
return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
}
/// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = BV->getSimpleValueType(0);
unsigned NumElts = VT.getVectorNumElements();
unsigned NumUndefsLO = 0;
unsigned NumUndefsHI = 0;
unsigned Half = NumElts/2;
// Count the number of UNDEF operands in the build_vector in input.
for (unsigned i = 0, e = Half; i != e; ++i)
if (BV->getOperand(i)->isUndef())
NumUndefsLO++;
for (unsigned i = Half, e = NumElts; i != e; ++i)
if (BV->getOperand(i)->isUndef())
NumUndefsHI++;
// Early exit if this is either a build_vector of all UNDEFs or all the
// operands but one are UNDEF.
if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
return SDValue();
SDLoc DL(BV);
SDValue InVec0, InVec1;
if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) {
// Try to match an SSE3 float HADD/HSUB.
if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
} else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget.hasSSSE3()) {
// Try to match an SSSE3 integer HADD/HSUB.
if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
}
if (!Subtarget.hasAVX())
return SDValue();
if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
// Try to match an AVX horizontal add/sub of packed single/double
// precision floating point values from 256-bit vectors.
SDValue InVec2, InVec3;
if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
} else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
// Try to match an AVX2 horizontal add/sub of signed integers.
SDValue InVec2, InVec3;
unsigned X86Opcode;
bool CanFold = true;
if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
X86Opcode = X86ISD::HADD;
else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
X86Opcode = X86ISD::HSUB;
else
CanFold = false;
if (CanFold) {
// Fold this build_vector into a single horizontal add/sub.
// Do this only if the target has AVX2.
if (Subtarget.hasAVX2())
return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
// Do not try to expand this build_vector into a pair of horizontal
// add/sub if we can emit a pair of scalar add/sub.
if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
return SDValue();
// Convert this build_vector into a pair of horizontal binop followed by
// a concat vector.
bool isUndefLO = NumUndefsLO == Half;
bool isUndefHI = NumUndefsHI == Half;
return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
isUndefLO, isUndefHI);
}
}
if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
VT == MVT::v16i16) && Subtarget.hasAVX()) {
unsigned X86Opcode;
if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
X86Opcode = X86ISD::HADD;
else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
X86Opcode = X86ISD::HSUB;
else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
X86Opcode = X86ISD::FHADD;
else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
X86Opcode = X86ISD::FHSUB;
else
return SDValue();
// Don't try to expand this build_vector into a pair of horizontal add/sub
// if we can simply emit a pair of scalar add/sub.
if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
return SDValue();
// Convert this build_vector into two horizontal add/sub followed by
// a concat vector.
bool isUndefLO = NumUndefsLO == Half;
bool isUndefHI = NumUndefsHI == Half;
return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
isUndefLO, isUndefHI);
}
return SDValue();
}
/// If a BUILD_VECTOR's source elements all apply the same bit operation and
/// one of their operands is constant, lower to a pair of BUILD_VECTOR and
/// just apply the bit to the vectors.
/// NOTE: Its not in our interest to start make a general purpose vectorizer
/// from this, but enough scalar bit operations are created from the later
/// legalization + scalarization stages to need basic support.
static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
SelectionDAG &DAG) {
SDLoc DL(Op);
MVT VT = Op->getSimpleValueType(0);
unsigned NumElems = VT.getVectorNumElements();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Check that all elements have the same opcode.
// TODO: Should we allow UNDEFS and if so how many?
unsigned Opcode = Op->getOperand(0).getOpcode();
for (unsigned i = 1; i < NumElems; ++i)
if (Opcode != Op->getOperand(i).getOpcode())
return SDValue();
// TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
switch (Opcode) {
default:
return SDValue();
case ISD::AND:
case ISD::XOR:
case ISD::OR:
// Don't do this if the buildvector is a splat - we'd replace one
// constant with an entire vector.
if (Op->getSplatValue())
return SDValue();
if (!TLI.isOperationLegalOrPromote(Opcode, VT))
return SDValue();
break;
}
SmallVector<SDValue, 4> LHSElts, RHSElts;
for (SDValue Elt : Op->ops()) {
SDValue LHS = Elt.getOperand(0);
SDValue RHS = Elt.getOperand(1);
// We expect the canonicalized RHS operand to be the constant.
if (!isa<ConstantSDNode>(RHS))
return SDValue();
LHSElts.push_back(LHS);
RHSElts.push_back(RHS);
}
SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
return DAG.getNode(Opcode, DL, VT, LHS, RHS);
}
/// Create a vector constant without a load. SSE/AVX provide the bare minimum
/// functionality to do this, so it's all zeros, all ones, or some derivation
/// that is cheap to calculate.
static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
// Vectors containing all zeros can be matched by pxor and xorps.
if (ISD::isBuildVectorAllZeros(Op.getNode())) {
// Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
// and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
return Op;
return getZeroVector(VT, Subtarget, DAG, DL);
}
// Vectors containing all ones can be matched by pcmpeqd on 128-bit width
// vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
// vpcmpeqd on 256-bit vectors.
if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
if (VT == MVT::v4i32 || VT == MVT::v16i32 ||
(VT == MVT::v8i32 && Subtarget.hasInt256()))
return Op;
return getOnesVector(VT, DAG, DL);
}
return SDValue();
}
/// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
/// from a vector of source values and a vector of extraction indices.
/// The vectors might be manipulated to match the type of the permute op.
static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
SDLoc &DL, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT ShuffleVT = VT;
EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
unsigned NumElts = VT.getVectorNumElements();
unsigned SizeInBits = VT.getSizeInBits();
// Adjust IndicesVec to match VT size.
assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
"Illegal variable permute mask size");
if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
NumElts * VT.getScalarSizeInBits());
IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
// Handle SrcVec that don't match VT type.
if (SrcVec.getValueSizeInBits() != SizeInBits) {
if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
// Handle larger SrcVec by treating it as a larger permute.
unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
Subtarget, DAG, SDLoc(IndicesVec));
return extractSubVector(
createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget), 0,
DAG, DL, SizeInBits);
} else if (SrcVec.getValueSizeInBits() < SizeInBits) {
// Widen smaller SrcVec to match VT.
SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
} else
return SDValue();
}
auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
EVT SrcVT = Idx.getValueType();
unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
uint64_t IndexScale = 0;
uint64_t IndexOffset = 0;
// If we're scaling a smaller permute op, then we need to repeat the
// indices, scaling and offsetting them as well.
// e.g. v4i32 -> v16i8 (Scale = 4)
// IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
// IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
for (uint64_t i = 0; i != Scale; ++i) {
IndexScale |= Scale << (i * NumDstBits);
IndexOffset |= i << (i * NumDstBits);
}
Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
return Idx;
};
unsigned Opcode = 0;
switch (VT.SimpleTy) {
default:
break;
case MVT::v16i8:
if (Subtarget.hasSSSE3())
Opcode = X86ISD::PSHUFB;
break;
case MVT::v8i16:
if (Subtarget.hasVLX() && Subtarget.hasBWI())
Opcode = X86ISD::VPERMV;
else if (Subtarget.hasSSSE3()) {
Opcode = X86ISD::PSHUFB;
ShuffleVT = MVT::v16i8;
}
break;
case MVT::v4f32:
case MVT::v4i32:
if (Subtarget.hasAVX()) {
Opcode = X86ISD::VPERMILPV;
ShuffleVT = MVT::v4f32;
} else if (Subtarget.hasSSSE3()) {
Opcode = X86ISD::PSHUFB;
ShuffleVT = MVT::v16i8;
}
break;
case MVT::v2f64:
case MVT::v2i64:
if (Subtarget.hasAVX()) {
// VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
Opcode = X86ISD::VPERMILPV;
ShuffleVT = MVT::v2f64;
} else if (Subtarget.hasSSE41()) {
// SSE41 can compare v2i64 - select between indices 0 and 1.
return DAG.getSelectCC(
DL, IndicesVec,
getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
ISD::CondCode::SETEQ);
}
break;
case MVT::v32i8:
if (Subtarget.hasVLX() && Subtarget.hasVBMI())
Opcode = X86ISD::VPERMV;
else if (Subtarget.hasXOP()) {
SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
return DAG.getNode(
ISD::CONCAT_VECTORS, DL, VT,
DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
} else if (Subtarget.hasAVX()) {
SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
// Permute Lo and Hi and then select based on index range.
// This works as SHUFB uses bits[3:0] to permute elements and we don't
// care about the bit[7] as its just an index vector.
SDValue Idx = Ops[2];
EVT VT = Idx.getValueType();
return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
ISD::CondCode::SETGT);
};
SDValue Ops[] = {LoLo, HiHi, IndicesVec};
return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
PSHUFBBuilder);
}
break;
case MVT::v16i16:
if (Subtarget.hasVLX() && Subtarget.hasBWI())
Opcode = X86ISD::VPERMV;
else if (Subtarget.hasAVX()) {
// Scale to v32i8 and perform as v32i8.
IndicesVec = ScaleIndices(IndicesVec, 2);
return DAG.getBitcast(
VT, createVariablePermute(
MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
}
break;
case MVT::v8f32:
case MVT::v8i32:
if (Subtarget.hasAVX2())
Opcode = X86ISD::VPERMV;
else if (Subtarget.hasAVX()) {
SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
{0, 1, 2, 3, 0, 1, 2, 3});
SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
{4, 5, 6, 7, 4, 5, 6, 7});
if (Subtarget.hasXOP())
return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32,
LoLo, HiHi, IndicesVec,
DAG.getConstant(0, DL, MVT::i8)));
// Permute Lo and Hi and then select based on index range.
// This works as VPERMILPS only uses index bits[0:1] to permute elements.
SDValue Res = DAG.getSelectCC(
DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
ISD::CondCode::SETGT);
return DAG.getBitcast(VT, Res);
}
break;
case MVT::v4i64:
case MVT::v4f64:
if (Subtarget.hasAVX512()) {
if (!Subtarget.hasVLX()) {
MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
SDLoc(SrcVec));
IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
DAG, SDLoc(IndicesVec));
SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
DAG, Subtarget);
return extract256BitVector(Res, 0, DAG, DL);
}
Opcode = X86ISD::VPERMV;
} else if (Subtarget.hasAVX()) {
SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
SDValue LoLo =
DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
SDValue HiHi =
DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
// VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
if (Subtarget.hasXOP())
return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64,
LoLo, HiHi, IndicesVec,
DAG.getConstant(0, DL, MVT::i8)));
// Permute Lo and Hi and then select based on index range.
// This works as VPERMILPD only uses index bit[1] to permute elements.
SDValue Res = DAG.getSelectCC(
DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
ISD::CondCode::SETGT);
return DAG.getBitcast(VT, Res);
}
break;
case MVT::v64i8:
if (Subtarget.hasVBMI())
Opcode = X86ISD::VPERMV;
break;
case MVT::v32i16:
if (Subtarget.hasBWI())
Opcode = X86ISD::VPERMV;
break;
case MVT::v16f32:
case MVT::v16i32:
case MVT::v8f64:
case MVT::v8i64:
if (Subtarget.hasAVX512())
Opcode = X86ISD::VPERMV;
break;
}
if (!Opcode)
return SDValue();
assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
(VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
"Illegal variable permute shuffle type");
uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
if (Scale > 1)
IndicesVec = ScaleIndices(IndicesVec, Scale);
EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
SDValue Res = Opcode == X86ISD::VPERMV
? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
: DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
return DAG.getBitcast(VT, Res);
}
// Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
// reasoned to be a permutation of a vector by indices in a non-constant vector.
// (build_vector (extract_elt V, (extract_elt I, 0)),
// (extract_elt V, (extract_elt I, 1)),
// ...
// ->
// (vpermv I, V)
//
// TODO: Handle undefs
// TODO: Utilize pshufb and zero mask blending to support more efficient
// construction of vectors with constant-0 elements.
static SDValue
LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue SrcVec, IndicesVec;
// Check for a match of the permute source vector and permute index elements.
// This is done by checking that the i-th build_vector operand is of the form:
// (extract_elt SrcVec, (extract_elt IndicesVec, i)).
for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
SDValue Op = V.getOperand(Idx);
if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
// If this is the first extract encountered in V, set the source vector,
// otherwise verify the extract is from the previously defined source
// vector.
if (!SrcVec)
SrcVec = Op.getOperand(0);
else if (SrcVec != Op.getOperand(0))
return SDValue();
SDValue ExtractedIndex = Op->getOperand(1);
// Peek through extends.
if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
ExtractedIndex = ExtractedIndex.getOperand(0);
if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
// If this is the first extract from the index vector candidate, set the
// indices vector, otherwise verify the extract is from the previously
// defined indices vector.
if (!IndicesVec)
IndicesVec = ExtractedIndex.getOperand(0);
else if (IndicesVec != ExtractedIndex.getOperand(0))
return SDValue();
auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
if (!PermIdx || PermIdx->getZExtValue() != Idx)
return SDValue();
}
SDLoc DL(V);
MVT VT = V.getSimpleValueType();
return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
}
SDValue
X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
unsigned NumElems = Op.getNumOperands();
// Generate vectors for predicate vectors.
if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
return VectorConstant;
BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
return AddSub;
if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
return HorizontalOp;
if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
return Broadcast;
if (SDValue BitOp = lowerBuildVectorToBitOp(BV, DAG))
return BitOp;
unsigned EVTBits = EltVT.getSizeInBits();
unsigned NumZero = 0;
unsigned NumNonZero = 0;
uint64_t NonZeros = 0;
bool IsAllConstants = true;
SmallSet<SDValue, 8> Values;
unsigned NumConstants = NumElems;
for (unsigned i = 0; i < NumElems; ++i) {
SDValue Elt = Op.getOperand(i);
if (Elt.isUndef())
continue;
Values.insert(Elt);
if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
IsAllConstants = false;
NumConstants--;
}
if (X86::isZeroNode(Elt))
NumZero++;
else {
assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
NonZeros |= ((uint64_t)1 << i);
NumNonZero++;
}
}
// All undef vector. Return an UNDEF. All zero vectors were handled above.
if (NumNonZero == 0)
return DAG.getUNDEF(VT);
// If we are inserting one variable into a vector of non-zero constants, try
// to avoid loading each constant element as a scalar. Load the constants as a
// vector and then insert the variable scalar element. If insertion is not
// supported, we assume that we will fall back to a shuffle to get the scalar
// blended with the constants. Insertion into a zero vector is handled as a
// special-case somewhere below here.
if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
(isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
// Create an all-constant vector. The variable element in the old
// build vector is replaced by undef in the constant vector. Save the
// variable scalar element and its index for use in the insertelement.
LLVMContext &Context = *DAG.getContext();
Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
SDValue VarElt;
SDValue InsIndex;
for (unsigned i = 0; i != NumElems; ++i) {
SDValue Elt = Op.getOperand(i);
if (auto *C = dyn_cast<ConstantSDNode>(Elt))
ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
else if (!Elt.isUndef()) {
assert(!VarElt.getNode() && !InsIndex.getNode() &&
"Expected one variable element in this vector");
VarElt = Elt;
InsIndex = DAG.getConstant(i, dl, getVectorIdxTy(DAG.getDataLayout()));
}
}
Constant *CV = ConstantVector::get(ConstVecOps);
SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
// The constants we just created may not be legal (eg, floating point). We
// must lower the vector right here because we can not guarantee that we'll
// legalize it before loading it. This is also why we could not just create
// a new build vector here. If the build vector contains illegal constants,
// it could get split back up into a series of insert elements.
// TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
MachineFunction &MF = DAG.getMachineFunction();
MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
}
// Special case for single non-zero, non-undef, element.
if (NumNonZero == 1) {
unsigned Idx = countTrailingZeros(NonZeros);
SDValue Item = Op.getOperand(Idx);
// If we have a constant or non-constant insertion into the low element of
// a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
// the rest of the elements. This will be matched as movd/movq/movss/movsd
// depending on what the source datatype is.
if (Idx == 0) {
if (NumZero == 0)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
(EltVT == MVT::i64 && Subtarget.is64Bit())) {
assert((VT.is128BitVector() || VT.is256BitVector() ||
VT.is512BitVector()) &&
"Expected an SSE value type!");
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
// Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
}
// We can't directly insert an i8 or i16 into a vector, so zero extend
// it to i32 first.
if (EltVT == MVT::i16 || EltVT == MVT::i8) {
Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
if (VT.getSizeInBits() >= 256) {
MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
if (Subtarget.hasAVX()) {
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
} else {
// Without AVX, we need to extend to a 128-bit vector and then
// insert into the 256-bit vector.
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
SDValue ZeroVec = getZeroVector(ShufVT, Subtarget, DAG, dl);
Item = insert128BitVector(ZeroVec, Item, 0, DAG, dl);
}
} else {
assert(VT.is128BitVector() && "Expected an SSE value type!");
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
}
return DAG.getBitcast(VT, Item);
}
}
// Is it a vector logical left shift?
if (NumElems == 2 && Idx == 1 &&
X86::isZeroNode(Op.getOperand(0)) &&
!X86::isZeroNode(Op.getOperand(1))) {
unsigned NumBits = VT.getSizeInBits();
return getVShift(true, VT,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
VT, Op.getOperand(1)),
NumBits/2, DAG, *this, dl);
}
if (IsAllConstants) // Otherwise, it's better to do a constpool load.
return SDValue();
// Otherwise, if this is a vector with i32 or f32 elements, and the element
// is a non-constant being inserted into an element other than the low one,
// we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
// movd/movss) to move this into the low element, then shuffle it into
// place.
if (EVTBits == 32) {
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
}
}
// Splat is obviously ok. Let legalizer expand it to a shuffle.
if (Values.size() == 1) {
if (EVTBits == 32) {
// Instead of a shuffle like this:
// shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
// Check if it's possible to issue this instead.
// shuffle (vload ptr)), undef, <1, 1, 1, 1>
unsigned Idx = countTrailingZeros(NonZeros);
SDValue Item = Op.getOperand(Idx);
if (Op.getNode()->isOnlyUserOf(Item.getNode()))
return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
}
return SDValue();
}
// A vector full of immediates; various special cases are already
// handled, so this is best done with a single constant-pool load.
if (IsAllConstants)
return SDValue();
if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
return V;
// See if we can use a vector load to get all of the elements.
{
SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
if (SDValue LD =
EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
return LD;
}
// If this is a splat of pairs of 32-bit elements, we can use a narrower
// build_vector and broadcast it.
// TODO: We could probably generalize this more.
if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
// Make sure all the even/odd operands match.
for (unsigned i = 2; i != NumElems; ++i)
if (Ops[i % 2] != Op.getOperand(i))
return false;
return true;
};
if (CanSplat(Op, NumElems, Ops)) {
MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
// Create a new build vector and cast to v2i64/v2f64.
SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
DAG.getBuildVector(NarrowVT, dl, Ops));
// Broadcast from v2i64/v2f64 and cast to final VT.
MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
NewBV));
}
}
// For AVX-length vectors, build the individual 128-bit pieces and use
// shuffles to put them in place.
if (VT.getSizeInBits() > 128) {
MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);
// Build both the lower and upper subvector.
SDValue Lower =
DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
SDValue Upper = DAG.getBuildVector(
HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
// Recreate the wider vector with the lower and upper part.
return concatSubVectors(Lower, Upper, VT, NumElems, DAG, dl,
VT.getSizeInBits() / 2);
}
// Let legalizer expand 2-wide build_vectors.
if (EVTBits == 64) {
if (NumNonZero == 1) {
// One half is zero or undef.
unsigned Idx = countTrailingZeros(NonZeros);
SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
Op.getOperand(Idx));
return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
}
return SDValue();
}
// If element VT is < 32 bits, convert it to inserts into a zero vector.
if (EVTBits == 8 && NumElems == 16)
if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
DAG, Subtarget))
return V;
if (EVTBits == 16 && NumElems == 8)
if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
DAG, Subtarget))
return V;
// If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
if (EVTBits == 32 && NumElems == 4)
if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
return V;
// If element VT is == 32 bits, turn it into a number of shuffles.
if (NumElems == 4 && NumZero > 0) {
SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < 4; ++i) {
bool isZero = !(NonZeros & (1ULL << i));
if (isZero)
Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
else
Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
}
for (unsigned i = 0; i < 2; ++i) {
switch ((NonZeros >> (i*2)) & 0x3) {
default: llvm_unreachable("Unexpected NonZero count");
case 0:
Ops[i] = Ops[i*2]; // Must be a zero vector.
break;
case 1:
Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
break;
case 2:
Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
break;
case 3:
Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
break;
}
}
bool Reverse1 = (NonZeros & 0x3) == 2;
bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
int MaskVec[] = {
Reverse1 ? 1 : 0,
Reverse1 ? 0 : 1,
static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
static_cast<int>(Reverse2 ? NumElems : NumElems+1)
};
return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
}
assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
// Check for a build vector from mostly shuffle plus few inserting.
if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
return Sh;
// For SSE 4.1, use insertps to put the high elements into the low element.
if (Subtarget.hasSSE41()) {
SDValue Result;
if (!Op.getOperand(0).isUndef())
Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
else
Result = DAG.getUNDEF(VT);
for (unsigned i = 1; i < NumElems; ++i) {
if (Op.getOperand(i).isUndef()) continue;
Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
}
return Result;
}
// Otherwise, expand into a number of unpckl*, start by extending each of
// our (non-undef) elements to the full vector width with the element in the
// bottom slot of the vector (which generates no code for SSE).
SmallVector<SDValue, 8> Ops(NumElems);
for (unsigned i = 0; i < NumElems; ++i) {
if (!Op.getOperand(i).isUndef())
Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
else
Ops[i] = DAG.getUNDEF(VT);
}
// Next, we iteratively mix elements, e.g. for v4f32:
// Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
// : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
// Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
// Generate scaled UNPCKL shuffle mask.
SmallVector<int, 16> Mask;
for(unsigned i = 0; i != Scale; ++i)
Mask.push_back(i);
for (unsigned i = 0; i != Scale; ++i)
Mask.push_back(NumElems+i);
Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
}
return Ops[0];
}
// 256-bit AVX can use the vinsertf128 instruction
// to create 256-bit vectors from two other 128-bit ones.
// TODO: Detect subvector broadcast here instead of DAG combine?
static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
MVT ResVT = Op.getSimpleValueType();
assert((ResVT.is256BitVector() ||
ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
unsigned NumOperands = Op.getNumOperands();
unsigned NumZero = 0;
unsigned NumNonZero = 0;
unsigned NonZeros = 0;
for (unsigned i = 0; i != NumOperands; ++i) {
SDValue SubVec = Op.getOperand(i);
if (SubVec.isUndef())
continue;
if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
++NumZero;
else {
assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
NonZeros |= 1 << i;
++NumNonZero;
}
}
// If we have more than 2 non-zeros, build each half separately.
if (NumNonZero > 2) {
MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
ResVT.getVectorNumElements()/2);
ArrayRef<SDUse> Ops = Op->ops();
SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
Ops.slice(0, NumOperands/2));
SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
Ops.slice(NumOperands/2));
return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
}
// Otherwise, build it up through insert_subvectors.
SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
: DAG.getUNDEF(ResVT);
MVT SubVT = Op.getOperand(0).getSimpleValueType();
unsigned NumSubElems = SubVT.getVectorNumElements();
for (unsigned i = 0; i != NumOperands; ++i) {
if ((NonZeros & (1 << i)) == 0)
continue;
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
Op.getOperand(i),
DAG.getIntPtrConstant(i * NumSubElems, dl));
}
return Vec;
}
// Return true if all the operands of the given CONCAT_VECTORS node are zeros
// except for the first one. (CONCAT_VECTORS Op, 0, 0,...,0)
static bool isExpandWithZeros(const SDValue &Op) {
assert(Op.getOpcode() == ISD::CONCAT_VECTORS &&
"Expand with zeros only possible in CONCAT_VECTORS nodes!");
for (unsigned i = 1; i < Op.getNumOperands(); i++)
if (!ISD::isBuildVectorAllZeros(Op.getOperand(i).getNode()))
return false;
return true;
}
// Returns true if the given node is a type promotion (by concatenating i1
// zeros) of the result of a node that already zeros all upper bits of
// k-register.
static SDValue isTypePromotionOfi1ZeroUpBits(SDValue Op) {
unsigned Opc = Op.getOpcode();
assert(Opc == ISD::CONCAT_VECTORS &&
Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
"Unexpected node to check for type promotion!");
// As long as we are concatenating zeros to the upper part of a previous node
// result, climb up the tree until a node with different opcode is
// encountered
while (Opc == ISD::INSERT_SUBVECTOR || Opc == ISD::CONCAT_VECTORS) {
if (Opc == ISD::INSERT_SUBVECTOR) {
if (ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()) &&
Op.getConstantOperandVal(2) == 0)
Op = Op.getOperand(1);
else
return SDValue();
} else { // Opc == ISD::CONCAT_VECTORS
if (isExpandWithZeros(Op))
Op = Op.getOperand(0);
else
return SDValue();
}
Opc = Op.getOpcode();
}
// Check if the first inserted node zeroes the upper bits, or an 'and' result
// of a node that zeros the upper bits (its masked version).
if (isMaskedZeroUpperBitsvXi1(Op.getOpcode()) ||
(Op.getOpcode() == ISD::AND &&
(isMaskedZeroUpperBitsvXi1(Op.getOperand(0).getOpcode()) ||
isMaskedZeroUpperBitsvXi1(Op.getOperand(1).getOpcode())))) {
return Op;
}
return SDValue();
}
// TODO: Merge this with LowerAVXCONCAT_VECTORS?
static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
const X86Subtarget &Subtarget,
SelectionDAG & DAG) {
SDLoc dl(Op);
MVT ResVT = Op.getSimpleValueType();
unsigned NumOperands = Op.getNumOperands();
assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
"Unexpected number of operands in CONCAT_VECTORS");
// If this node promotes - by concatenating zeroes - the type of the result
// of a node with instruction that zeroes all upper (irrelevant) bits of the
// output register, mark it as legal and catch the pattern in instruction
// selection to avoid emitting extra instructions (for zeroing upper bits).
if (SDValue Promoted = isTypePromotionOfi1ZeroUpBits(Op))
return widenSubVector(ResVT, Promoted, true, Subtarget, DAG, dl);
unsigned NumZero = 0;
unsigned NumNonZero = 0;
uint64_t NonZeros = 0;
for (unsigned i = 0; i != NumOperands; ++i) {
SDValue SubVec = Op.getOperand(i);
if (SubVec.isUndef())
continue;
if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
++NumZero;
else {
assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
NonZeros |= (uint64_t)1 << i;
++NumNonZero;
}
}
// If there are zero or one non-zeros we can handle this very simply.
if (NumNonZero <= 1) {
SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
: DAG.getUNDEF(ResVT);
if (!NumNonZero)
return Vec;
unsigned Idx = countTrailingZeros(NonZeros);
SDValue SubVec = Op.getOperand(Idx);
unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
}
if (NumOperands > 2) {
MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(),
ResVT.getVectorNumElements()/2);
ArrayRef<SDUse> Ops = Op->ops();
SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
Ops.slice(0, NumOperands/2));
SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
Ops.slice(NumOperands/2));
return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
}
assert(NumNonZero == 2 && "Simple cases not handled?");
if (ResVT.getVectorNumElements() >= 16)
return Op; // The operation is legal with KUNPCK
SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
DAG.getUNDEF(ResVT), Op.getOperand(0),
DAG.getIntPtrConstant(0, dl));
unsigned NumElems = ResVT.getVectorNumElements();
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
DAG.getIntPtrConstant(NumElems/2, dl));
}
static SDValue LowerCONCAT_VECTORS(SDValue Op,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
if (VT.getVectorElementType() == MVT::i1)
return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
(VT.is512BitVector() && (Op.getNumOperands() == 2 ||
Op.getNumOperands() == 4)));
// AVX can use the vinsertf128 instruction to create 256-bit vectors
// from two other 128-bit ones.
// 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
}
//===----------------------------------------------------------------------===//
// Vector shuffle lowering
//
// This is an experimental code path for lowering vector shuffles on x86. It is
// designed to handle arbitrary vector shuffles and blends, gracefully
// degrading performance as necessary. It works hard to recognize idiomatic
// shuffles and lower them to optimal instruction patterns without leaving
// a framework that allows reasonably efficient handling of all vector shuffle
// patterns.
//===----------------------------------------------------------------------===//
/// Tiny helper function to identify a no-op mask.
///
/// This is a somewhat boring predicate function. It checks whether the mask
/// array input, which is assumed to be a single-input shuffle mask of the kind
/// used by the X86 shuffle instructions (not a fully general
/// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
/// in-place shuffle are 'no-op's.
static bool isNoopShuffleMask(ArrayRef<int> Mask) {
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
assert(Mask[i] >= -1 && "Out of bound mask element!");
if (Mask[i] >= 0 && Mask[i] != i)
return false;
}
return true;
}
/// Test whether there are elements crossing 128-bit lanes in this
/// shuffle mask.
///
/// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
/// and we routinely test for these.
static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
int LaneSize = 128 / VT.getScalarSizeInBits();
int Size = Mask.size();
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
return true;
return false;
}
/// Test whether a shuffle mask is equivalent within each sub-lane.
///
/// This checks a shuffle mask to see if it is performing the same
/// lane-relative shuffle in each sub-lane. This trivially implies
/// that it is also not lane-crossing. It may however involve a blend from the
/// same lane of a second vector.
///
/// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
/// non-trivial to compute in the face of undef lanes. The representation is
/// suitable for use with existing 128-bit shuffles as entries from the second
/// vector have been remapped to [LaneSize, 2*LaneSize).
static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
ArrayRef<int> Mask,
SmallVectorImpl<int> &RepeatedMask) {
auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
RepeatedMask.assign(LaneSize, -1);
int Size = Mask.size();
for (int i = 0; i < Size; ++i) {
assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
if (Mask[i] < 0)
continue;
if ((Mask[i] % Size) / LaneSize != i / LaneSize)
// This entry crosses lanes, so there is no way to model this shuffle.
return false;
// Ok, handle the in-lane shuffles by detecting if and when they repeat.
// Adjust second vector indices to start at LaneSize instead of Size.
int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
: Mask[i] % LaneSize + LaneSize;
if (RepeatedMask[i % LaneSize] < 0)
// This is the first non-undef entry in this slot of a 128-bit lane.
RepeatedMask[i % LaneSize] = LocalM;
else if (RepeatedMask[i % LaneSize] != LocalM)
// Found a mismatch with the repeated mask.
return false;
}
return true;
}
/// Test whether a shuffle mask is equivalent within each 128-bit lane.
static bool
is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
SmallVectorImpl<int> &RepeatedMask) {
return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
}
static bool
is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
SmallVector<int, 32> RepeatedMask;
return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
}
/// Test whether a shuffle mask is equivalent within each 256-bit lane.
static bool
is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
SmallVectorImpl<int> &RepeatedMask) {
return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
}
/// Test whether a target shuffle mask is equivalent within each sub-lane.
/// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
ArrayRef<int> Mask,
SmallVectorImpl<int> &RepeatedMask) {
int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
RepeatedMask.assign(LaneSize, SM_SentinelUndef);
int Size = Mask.size();
for (int i = 0; i < Size; ++i) {
assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
if (Mask[i] == SM_SentinelUndef)
continue;
if (Mask[i] == SM_SentinelZero) {
if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
return false;
RepeatedMask[i % LaneSize] = SM_SentinelZero;
continue;
}
if ((Mask[i] % Size) / LaneSize != i / LaneSize)
// This entry crosses lanes, so there is no way to model this shuffle.
return false;
// Ok, handle the in-lane shuffles by detecting if and when they repeat.
// Adjust second vector indices to start at LaneSize instead of Size.
int LocalM =
Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
// This is the first non-undef entry in this slot of a 128-bit lane.
RepeatedMask[i % LaneSize] = LocalM;
else if (RepeatedMask[i % LaneSize] != LocalM)
// Found a mismatch with the repeated mask.
return false;
}
return true;
}
/// Checks whether a shuffle mask is equivalent to an explicit list of
/// arguments.
///
/// This is a fast way to test a shuffle mask against a fixed pattern:
///
/// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
///
/// It returns true if the mask is exactly as wide as the argument list, and
/// each element of the mask is either -1 (signifying undef) or the value given
/// in the argument.
static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
ArrayRef<int> ExpectedMask) {
if (Mask.size() != ExpectedMask.size())
return false;
int Size = Mask.size();
// If the values are build vectors, we can look through them to find
// equivalent inputs that make the shuffles equivalent.
auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
for (int i = 0; i < Size; ++i) {
assert(Mask[i] >= -1 && "Out of bound mask element!");
if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
if (!MaskBV || !ExpectedBV ||
MaskBV->getOperand(Mask[i] % Size) !=
ExpectedBV->getOperand(ExpectedMask[i] % Size))
return false;
}
}
return true;
}
/// Checks whether a target shuffle mask is equivalent to an explicit pattern.
///
/// The masks must be exactly the same width.
///
/// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
/// value in ExpectedMask is always accepted. Otherwise the indices must match.
///
/// SM_SentinelZero is accepted as a valid negative index but must match in both.
static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
ArrayRef<int> ExpectedMask) {
int Size = Mask.size();
if (Size != (int)ExpectedMask.size())
return false;
for (int i = 0; i < Size; ++i)
if (Mask[i] == SM_SentinelUndef)
continue;
else if (Mask[i] < 0 && Mask[i] != SM_SentinelZero)
return false;
else if (Mask[i] != ExpectedMask[i])
return false;
return true;
}
// Merges a general DAG shuffle mask and zeroable bit mask into a target shuffle
// mask.
static SmallVector<int, 64> createTargetShuffleMask(ArrayRef<int> Mask,
const APInt &Zeroable) {
int NumElts = Mask.size();
assert(NumElts == (int)Zeroable.getBitWidth() && "Mismatch mask sizes");
SmallVector<int, 64> TargetMask(NumElts, SM_SentinelUndef);
for (int i = 0; i != NumElts; ++i) {
int M = Mask[i];
if (M == SM_SentinelUndef)
continue;
assert(0 <= M && M < (2 * NumElts) && "Out of range shuffle index");
TargetMask[i] = (Zeroable[i] ? SM_SentinelZero : M);
}
return TargetMask;
}
// Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
// instructions.
static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
if (VT != MVT::v8i32 && VT != MVT::v8f32)
return false;
SmallVector<int, 8> Unpcklwd;
createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
/* Unary = */ false);
SmallVector<int, 8> Unpckhwd;
createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
/* Unary = */ false);
bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
isTargetShuffleEquivalent(Mask, Unpckhwd));
return IsUnpackwdMask;
}
/// Get a 4-lane 8-bit shuffle immediate for a mask.
///
/// This helper function produces an 8-bit shuffle immediate corresponding to
/// the ubiquitous shuffle encoding scheme used in x86 instructions for
/// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
/// example.
///
/// NB: We rely heavily on "undef" masks preserving the input lane.
static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
unsigned Imm = 0;
Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
return Imm;
}
static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
SelectionDAG &DAG) {
return DAG.getConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
}
/// Compute whether each element of a shuffle is zeroable.
///
/// A "zeroable" vector shuffle element is one which can be lowered to zero.
/// Either it is an undef element in the shuffle mask, the element of the input
/// referenced is undef, or the element of the input referenced is known to be
/// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
/// as many lanes with this technique as possible to simplify the remaining
/// shuffle.
static APInt computeZeroableShuffleElements(ArrayRef<int> Mask,
SDValue V1, SDValue V2) {
APInt Zeroable(Mask.size(), 0);
V1 = peekThroughBitcasts(V1);
V2 = peekThroughBitcasts(V2);
bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
int VectorSizeInBits = V1.getValueSizeInBits();
int ScalarSizeInBits = VectorSizeInBits / Mask.size();
assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
int M = Mask[i];
// Handle the easy cases.
if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
Zeroable.setBit(i);
continue;
}
// Determine shuffle input and normalize the mask.
SDValue V = M < Size ? V1 : V2;
M %= Size;
// Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
if (V.getOpcode() != ISD::BUILD_VECTOR)
continue;
// If the BUILD_VECTOR has fewer elements then the bitcasted portion of
// the (larger) source element must be UNDEF/ZERO.
if ((Size % V.getNumOperands()) == 0) {
int Scale = Size / V->getNumOperands();
SDValue Op = V.getOperand(M / Scale);
if (Op.isUndef() || X86::isZeroNode(Op))
Zeroable.setBit(i);
else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
APInt Val = Cst->getAPIntValue();
Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
Val = Val.getLoBits(ScalarSizeInBits);
if (Val == 0)
Zeroable.setBit(i);
} else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
APInt Val = Cst->getValueAPF().bitcastToAPInt();
Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
Val = Val.getLoBits(ScalarSizeInBits);
if (Val == 0)
Zeroable.setBit(i);
}
continue;
}
// If the BUILD_VECTOR has more elements then all the (smaller) source
// elements must be UNDEF or ZERO.
if ((V.getNumOperands() % Size) == 0) {
int Scale = V->getNumOperands() / Size;
bool AllZeroable = true;
for (int j = 0; j < Scale; ++j) {
SDValue Op = V.getOperand((M * Scale) + j);
AllZeroable &= (Op.isUndef() || X86::isZeroNode(Op));
}
if (AllZeroable)
Zeroable.setBit(i);
continue;
}
}
return Zeroable;
}
// The Shuffle result is as follow:
// 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
// Each Zeroable's element correspond to a particular Mask's element.
// As described in computeZeroableShuffleElements function.
//
// The function looks for a sub-mask that the nonzero elements are in
// increasing order. If such sub-mask exist. The function returns true.
static bool isNonZeroElementsInOrder(const APInt &Zeroable,
ArrayRef<int> Mask, const EVT &VectorType,
bool &IsZeroSideLeft) {
int NextElement = -1;
// Check if the Mask's nonzero elements are in increasing order.
for (int i = 0, e = Mask.size(); i < e; i++) {
// Checks if the mask's zeros elements are built from only zeros.
assert(Mask[i] >= -1 && "Out of bound mask element!");
if (Mask[i] < 0)
return false;
if (Zeroable[i])
continue;
// Find the lowest non zero element
if (NextElement < 0) {
NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
IsZeroSideLeft = NextElement != 0;
}
// Exit if the mask's non zero elements are not in increasing order.
if (NextElement != Mask[i])
return false;
NextElement++;
}
return true;
}
/// Try to lower a shuffle with a single PSHUFB of V1 or V2.
static SDValue lowerVectorShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
int Size = Mask.size();
int LaneSize = 128 / VT.getScalarSizeInBits();
const int NumBytes = VT.getSizeInBits() / 8;
const int NumEltBytes = VT.getScalarSizeInBits() / 8;
assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
(Subtarget.hasAVX2() && VT.is256BitVector()) ||
(Subtarget.hasBWI() && VT.is512BitVector()));
SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
// Sign bit set in i8 mask means zero element.
SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
SDValue V;
for (int i = 0; i < NumBytes; ++i) {
int M = Mask[i / NumEltBytes];
if (M < 0) {
PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
continue;
}
if (Zeroable[i / NumEltBytes]) {
PSHUFBMask[i] = ZeroMask;
continue;
}
// We can only use a single input of V1 or V2.
SDValue SrcV = (M >= Size ? V2 : V1);
if (V && V != SrcV)
return SDValue();
V = SrcV;
M %= Size;
// PSHUFB can't cross lanes, ensure this doesn't happen.
if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
return SDValue();
M = M % LaneSize;
M = M * NumEltBytes + (i % NumEltBytes);
PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
}
assert(V && "Failed to find a source input");
MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
return DAG.getBitcast(
VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
}
static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
const X86Subtarget &Subtarget, SelectionDAG &DAG,
const SDLoc &dl);
// X86 has dedicated shuffle that can be lowered to VEXPAND
static SDValue lowerVectorShuffleToEXPAND(const SDLoc &DL, MVT VT,
const APInt &Zeroable,
ArrayRef<int> Mask, SDValue &V1,
SDValue &V2, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
bool IsLeftZeroSide = true;
if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
IsLeftZeroSide))
return SDValue();
unsigned VEXPANDMask = (~Zeroable).getZExtValue();
MVT IntegerType =
MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
unsigned NumElts = VT.getVectorNumElements();
assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
"Unexpected number of vector elements");
SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
Subtarget, DAG, DL);
SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
return DAG.getSelect(DL, VT, VMask,
DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector),
ZeroVector);
}
static bool matchVectorShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
unsigned &UnpackOpcode, bool IsUnary,
ArrayRef<int> TargetMask,
const SDLoc &DL, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
int NumElts = VT.getVectorNumElements();
bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
for (int i = 0; i != NumElts; i += 2) {
int M1 = TargetMask[i + 0];
int M2 = TargetMask[i + 1];
Undef1 &= (SM_SentinelUndef == M1);
Undef2 &= (SM_SentinelUndef == M2);
Zero1 &= isUndefOrZero(M1);
Zero2 &= isUndefOrZero(M2);
}
assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
"Zeroable shuffle detected");
// Attempt to match the target mask against the unpack lo/hi mask patterns.
SmallVector<int, 64> Unpckl, Unpckh;
createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
UnpackOpcode = X86ISD::UNPCKL;
V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
return true;
}
createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
UnpackOpcode = X86ISD::UNPCKH;
V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
return true;
}
// If an unary shuffle, attempt to match as an unpack lo/hi with zero.
if (IsUnary && (Zero1 || Zero2)) {
// Don't bother if we can blend instead.
if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
return false;
bool MatchLo = true, MatchHi = true;
for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
int M = TargetMask[i];
// Ignore if the input is known to be zero or the index is undef.
if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
(M == SM_SentinelUndef))
continue;
MatchLo &= (M == Unpckl[i]);
MatchHi &= (M == Unpckh[i]);
}
if (MatchLo || MatchHi) {
UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
return true;
}
}
// If a binary shuffle, commute and try again.
if (!IsUnary) {
ShuffleVectorSDNode::commuteMask(Unpckl);
if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
UnpackOpcode = X86ISD::UNPCKL;
std::swap(V1, V2);
return true;
}
ShuffleVectorSDNode::commuteMask(Unpckh);
if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
UnpackOpcode = X86ISD::UNPCKH;
std::swap(V1, V2);
return true;
}
}
return false;
}
// X86 has dedicated unpack instructions that can handle specific blend
// operations: UNPCKH and UNPCKL.
static SDValue lowerVectorShuffleWithUNPCK(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
SmallVector<int, 8> Unpckl;
createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
SmallVector<int, 8> Unpckh;
createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
// Commute and try again.
ShuffleVectorSDNode::commuteMask(Unpckl);
if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
ShuffleVectorSDNode::commuteMask(Unpckh);
if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
return SDValue();
}
static bool matchVectorShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
int Delta) {
int Size = (int)Mask.size();
int Split = Size / Delta;
int TruncatedVectorStart = SwappedOps ? Size : 0;
// Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,...
if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta))
return false;
// The rest of the mask should not refer to the truncated vector's elements.
if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart,
TruncatedVectorStart + Size))
return false;
return true;
}
// Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
//
// An example is the following:
//
// t0: ch = EntryToken
// t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
// t25: v4i32 = truncate t2
// t41: v8i16 = bitcast t25
// t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
// Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
// t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
// t18: v2i64 = bitcast t51
//
// Without avx512vl, this is lowered to:
//
// vpmovqd %zmm0, %ymm0
// vpshufb {{.*#+}} xmm0 =
// xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
//
// But when avx512vl is available, one can just use a single vpmovdw
// instruction.
static SDValue lowerVectorShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
MVT VT, SDValue V1, SDValue V2,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (VT != MVT::v16i8 && VT != MVT::v8i16)
return SDValue();
if (Mask.size() != VT.getVectorNumElements())
return SDValue();
bool SwappedOps = false;
if (!ISD::isBuildVectorAllZeros(V2.getNode())) {
if (!ISD::isBuildVectorAllZeros(V1.getNode()))
return SDValue();
std::swap(V1, V2);
SwappedOps = true;
}
// Look for:
//
// bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8>
// bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16>
//
// and similar ones.
if (V1.getOpcode() != ISD::BITCAST)
return SDValue();
if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE)
return SDValue();
SDValue Src = V1.getOperand(0).getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
// The vptrunc** instructions truncating 128 bit and 256 bit vectors
// are only available with avx512vl.
if (!SrcVT.is512BitVector() && !Subtarget.hasVLX())
return SDValue();
// Down Convert Word to Byte is only available with avx512bw. The case with
// 256-bit output doesn't contain a shuffle and is therefore not handled here.
if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
!Subtarget.hasBWI())
return SDValue();
// The first half/quarter of the mask should refer to every second/fourth
// element of the vector truncated and bitcasted.
if (!matchVectorShuffleAsVPMOV(Mask, SwappedOps, 2) &&
!matchVectorShuffleAsVPMOV(Mask, SwappedOps, 4))
return SDValue();
return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
}
// X86 has dedicated pack instructions that can handle specific truncation
// operations: PACKSS and PACKUS.
static bool matchVectorShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1,
SDValue &V2, unsigned &PackOpcode,
ArrayRef<int> TargetMask,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
unsigned NumElts = VT.getVectorNumElements();
unsigned BitSize = VT.getScalarSizeInBits();
MVT PackSVT = MVT::getIntegerVT(BitSize * 2);
MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2);
auto MatchPACK = [&](SDValue N1, SDValue N2) {
SDValue VV1 = DAG.getBitcast(PackVT, N1);
SDValue VV2 = DAG.getBitcast(PackVT, N2);
if (Subtarget.hasSSE41() || PackSVT == MVT::i16) {
APInt ZeroMask = APInt::getHighBitsSet(BitSize * 2, BitSize);
if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) &&
(N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) {
V1 = VV1;
V2 = VV2;
SrcVT = PackVT;
PackOpcode = X86ISD::PACKUS;
return true;
}
}
if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) &&
(N2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize)) {
V1 = VV1;
V2 = VV2;
SrcVT = PackVT;
PackOpcode = X86ISD::PACKSS;
return true;
}
return false;
};
// Try binary shuffle.
SmallVector<int, 32> BinaryMask;
createPackShuffleMask(VT, BinaryMask, false);
if (isTargetShuffleEquivalent(TargetMask, BinaryMask))
if (MatchPACK(V1, V2))
return true;
// Try unary shuffle.
SmallVector<int, 32> UnaryMask;
createPackShuffleMask(VT, UnaryMask, true);
if (isTargetShuffleEquivalent(TargetMask, UnaryMask))
if (MatchPACK(V1, V1))
return true;
return false;
}
static SDValue lowerVectorShuffleWithPACK(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT PackVT;
unsigned PackOpcode;
if (matchVectorShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
Subtarget))
return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1),
DAG.getBitcast(PackVT, V2));
return SDValue();
}
/// Try to emit a bitmask instruction for a shuffle.
///
/// This handles cases where we can model a blend exactly as a bitmask due to
/// one of the inputs being zeroable.
static SDValue lowerVectorShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable,
SelectionDAG &DAG) {
assert(!VT.isFloatingPoint() && "Floating point types are not supported");
MVT EltVT = VT.getVectorElementType();
SDValue Zero = DAG.getConstant(0, DL, EltVT);
SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
SDValue V;
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
if (Zeroable[i])
continue;
if (Mask[i] % Size != i)
return SDValue(); // Not a blend.
if (!V)
V = Mask[i] < Size ? V1 : V2;
else if (V != (Mask[i] < Size ? V1 : V2))
return SDValue(); // Can only let one input through the mask.
VMaskOps[i] = AllOnes;
}
if (!V)
return SDValue(); // No non-zeroable elements!
SDValue VMask = DAG.getBuildVector(VT, DL, VMaskOps);
return DAG.getNode(ISD::AND, DL, VT, V, VMask);
}
/// Try to emit a blend instruction for a shuffle using bit math.
///
/// This is used as a fallback approach when first class blend instructions are
/// unavailable. Currently it is only suitable for integer vectors, but could
/// be generalized for floating point vectors if desirable.
static SDValue lowerVectorShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(VT.isInteger() && "Only supports integer vector types!");
MVT EltVT = VT.getVectorElementType();
SDValue Zero = DAG.getConstant(0, DL, EltVT);
SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
SmallVector<SDValue, 16> MaskOps;
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
return SDValue(); // Shuffled input!
MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
}
SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
// We have to cast V2 around.
MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
V2 = DAG.getBitcast(VT, DAG.getNode(X86ISD::ANDNP, DL, MaskVT,
DAG.getBitcast(MaskVT, V1Mask),
DAG.getBitcast(MaskVT, V2)));
return DAG.getNode(ISD::OR, DL, VT, V1, V2);
}
static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
const X86Subtarget &Subtarget,
SelectionDAG &DAG);
static bool matchVectorShuffleAsBlend(SDValue V1, SDValue V2,
MutableArrayRef<int> TargetMask,
bool &ForceV1Zero, bool &ForceV2Zero,
uint64_t &BlendMask) {
bool V1IsZeroOrUndef =
V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
bool V2IsZeroOrUndef =
V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
BlendMask = 0;
ForceV1Zero = false, ForceV2Zero = false;
assert(TargetMask.size() <= 64 && "Shuffle mask too big for blend mask");
// Attempt to generate the binary blend mask. If an input is zero then
// we can use any lane.
// TODO: generalize the zero matching to any scalar like isShuffleEquivalent.
for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
int M = TargetMask[i];
if (M == SM_SentinelUndef)
continue;
if (M == i)
continue;
if (M == i + Size) {
BlendMask |= 1ull << i;
continue;
}
if (M == SM_SentinelZero) {
if (V1IsZeroOrUndef) {
ForceV1Zero = true;
TargetMask[i] = i;
continue;
}
if (V2IsZeroOrUndef) {
ForceV2Zero = true;
BlendMask |= 1ull << i;
TargetMask[i] = i + Size;
continue;
}
}
return false;
}
return true;
}
static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
int Scale) {
uint64_t ScaledMask = 0;
for (int i = 0; i != Size; ++i)
if (BlendMask & (1ull << i))
ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
return ScaledMask;
}
/// Try to emit a blend instruction for a shuffle.
///
/// This doesn't do any checks for the availability of instructions for blending
/// these values. It relies on the availability of the X86ISD::BLENDI pattern to
/// be matched in the backend with the type given. What it does check for is
/// that the shuffle mask is a blend, or convertible into a blend with zero.
static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Original,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SmallVector<int, 64> Mask = createTargetShuffleMask(Original, Zeroable);
uint64_t BlendMask = 0;
bool ForceV1Zero = false, ForceV2Zero = false;
if (!matchVectorShuffleAsBlend(V1, V2, Mask, ForceV1Zero, ForceV2Zero,
BlendMask))
return SDValue();
// Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
if (ForceV1Zero)
V1 = getZeroVector(VT, Subtarget, DAG, DL);
if (ForceV2Zero)
V2 = getZeroVector(VT, Subtarget, DAG, DL);
switch (VT.SimpleTy) {
case MVT::v2f64:
case MVT::v4f32:
case MVT::v4f64:
case MVT::v8f32:
return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
DAG.getConstant(BlendMask, DL, MVT::i8));
case MVT::v4i64:
case MVT::v8i32:
assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
LLVM_FALLTHROUGH;
case MVT::v2i64:
case MVT::v4i32:
// If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
// that instruction.
if (Subtarget.hasAVX2()) {
// Scale the blend by the number of 32-bit dwords per element.
int Scale = VT.getScalarSizeInBits() / 32;
BlendMask = scaleVectorShuffleBlendMask(BlendMask, Mask.size(), Scale);
MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
V1 = DAG.getBitcast(BlendVT, V1);
V2 = DAG.getBitcast(BlendVT, V2);
return DAG.getBitcast(
VT, DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
DAG.getConstant(BlendMask, DL, MVT::i8)));
}
LLVM_FALLTHROUGH;
case MVT::v8i16: {
// For integer shuffles we need to expand the mask and cast the inputs to
// v8i16s prior to blending.
int Scale = 8 / VT.getVectorNumElements();
BlendMask = scaleVectorShuffleBlendMask(BlendMask, Mask.size(), Scale);
V1 = DAG.getBitcast(MVT::v8i16, V1);
V2 = DAG.getBitcast(MVT::v8i16, V2);
return DAG.getBitcast(VT,
DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
DAG.getConstant(BlendMask, DL, MVT::i8)));
}
case MVT::v16i16: {
assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
SmallVector<int, 8> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
// We can lower these with PBLENDW which is mirrored across 128-bit lanes.
assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
BlendMask = 0;
for (int i = 0; i < 8; ++i)
if (RepeatedMask[i] >= 8)
BlendMask |= 1ull << i;
return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
DAG.getConstant(BlendMask, DL, MVT::i8));
}
LLVM_FALLTHROUGH;
}
case MVT::v16i8:
case MVT::v32i8: {
assert((VT.is128BitVector() || Subtarget.hasAVX2()) &&
"256-bit byte-blends require AVX2 support!");
if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
MVT IntegerType =
MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
}
// Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
if (SDValue Masked =
lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable, DAG))
return Masked;
// Scale the blend by the number of bytes per element.
int Scale = VT.getScalarSizeInBits() / 8;
// This form of blend is always done on bytes. Compute the byte vector
// type.
MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
// Compute the VSELECT mask. Note that VSELECT is really confusing in the
// mix of LLVM's code generator and the x86 backend. We tell the code
// generator that boolean values in the elements of an x86 vector register
// are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
// mapping a select to operand #1, and 'false' mapping to operand #2. The
// reality in x86 is that vector masks (pre-AVX-512) use only the high bit
// of the element (the remaining are ignored) and 0 in that high bit would
// mean operand #1 while 1 in the high bit would mean operand #2. So while
// the LLVM model for boolean values in vector elements gets the relevant
// bit set, it is set backwards and over constrained relative to x86's
// actual model.
SmallVector<SDValue, 32> VSELECTMask;
for (int i = 0, Size = Mask.size(); i < Size; ++i)
for (int j = 0; j < Scale; ++j)
VSELECTMask.push_back(
Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
: DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
MVT::i8));
V1 = DAG.getBitcast(BlendVT, V1);
V2 = DAG.getBitcast(BlendVT, V2);
return DAG.getBitcast(
VT,
DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
V1, V2));
}
case MVT::v16f32:
case MVT::v8f64:
case MVT::v8i64:
case MVT::v16i32:
case MVT::v32i16:
case MVT::v64i8: {
MVT IntegerType =
MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
}
default:
llvm_unreachable("Not a supported integer vector type!");
}
}
/// Try to lower as a blend of elements from two inputs followed by
/// a single-input permutation.
///
/// This matches the pattern where we can blend elements from two inputs and
/// then reduce the shuffle to a single-input permutation.
static SDValue lowerVectorShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
// We build up the blend mask while checking whether a blend is a viable way
// to reduce the shuffle.
SmallVector<int, 32> BlendMask(Mask.size(), -1);
SmallVector<int, 32> PermuteMask(Mask.size(), -1);
for (int i = 0, Size = Mask.size(); i < Size; ++i) {
if (Mask[i] < 0)
continue;
assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
if (BlendMask[Mask[i] % Size] < 0)
BlendMask[Mask[i] % Size] = Mask[i];
else if (BlendMask[Mask[i] % Size] != Mask[i])
return SDValue(); // Can't blend in the needed input!
PermuteMask[i] = Mask[i] % Size;
}
SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
}
/// Generic routine to decompose a shuffle and blend into independent
/// blends and permutes.
///
/// This matches the extremely common pattern for handling combined
/// shuffle+blend operations on newer X86 ISAs where we have very fast blend
/// operations. It will try to pick the best arrangement of shuffles and
/// blends.
static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(const SDLoc &DL,
MVT VT, SDValue V1,
SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
// Shuffle the input elements into the desired positions in V1 and V2 and
// blend them together.
SmallVector<int, 32> V1Mask(Mask.size(), -1);
SmallVector<int, 32> V2Mask(Mask.size(), -1);
SmallVector<int, 32> BlendMask(Mask.size(), -1);
for (int i = 0, Size = Mask.size(); i < Size; ++i)
if (Mask[i] >= 0 && Mask[i] < Size) {
V1Mask[i] = Mask[i];
BlendMask[i] = i;
} else if (Mask[i] >= Size) {
V2Mask[i] = Mask[i] - Size;
BlendMask[i] = i + Size;
}
// Try to lower with the simpler initial blend strategy unless one of the
// input shuffles would be a no-op. We prefer to shuffle inputs as the
// shuffle may be able to fold with a load or other benefit. However, when
// we'll have to do 2x as many shuffles in order to achieve this, blending
// first is a better strategy.
if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
if (SDValue BlendPerm =
lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
return BlendPerm;
V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
}
/// Try to lower a vector shuffle as a rotation.
///
/// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
static int matchVectorShuffleAsRotate(SDValue &V1, SDValue &V2,
ArrayRef<int> Mask) {
int NumElts = Mask.size();
// We need to detect various ways of spelling a rotation:
// [11, 12, 13, 14, 15, 0, 1, 2]
// [-1, 12, 13, 14, -1, -1, 1, -1]
// [-1, -1, -1, -1, -1, -1, 1, 2]
// [ 3, 4, 5, 6, 7, 8, 9, 10]
// [-1, 4, 5, 6, -1, -1, 9, -1]
// [-1, 4, 5, 6, -1, -1, -1, -1]
int Rotation = 0;
SDValue Lo, Hi;
for (int i = 0; i < NumElts; ++i) {
int M = Mask[i];
assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
"Unexpected mask index.");
if (M < 0)
continue;
// Determine where a rotated vector would have started.
int StartIdx = i - (M % NumElts);
if (StartIdx == 0)
// The identity rotation isn't interesting, stop.
return -1;
// If we found the tail of a vector the rotation must be the missing
// front. If we found the head of a vector, it must be how much of the
// head.
int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
if (Rotation == 0)
Rotation = CandidateRotation;
else if (Rotation != CandidateRotation)
// The rotations don't match, so we can't match this mask.
return -1;
// Compute which value this mask is pointing at.
SDValue MaskV = M < NumElts ? V1 : V2;
// Compute which of the two target values this index should be assigned
// to. This reflects whether the high elements are remaining or the low
// elements are remaining.
SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
// Either set up this value if we've not encountered it before, or check
// that it remains consistent.
if (!TargetV)
TargetV = MaskV;
else if (TargetV != MaskV)
// This may be a rotation, but it pulls from the inputs in some
// unsupported interleaving.
return -1;
}
// Check that we successfully analyzed the mask, and normalize the results.
assert(Rotation != 0 && "Failed to locate a viable rotation!");
assert((Lo || Hi) && "Failed to find a rotated input vector!");
if (!Lo)
Lo = Hi;
else if (!Hi)
Hi = Lo;
V1 = Lo;
V2 = Hi;
return Rotation;
}
/// Try to lower a vector shuffle as a byte rotation.
///
/// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
/// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
/// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
/// try to generically lower a vector shuffle through such an pattern. It
/// does not check for the profitability of lowering either as PALIGNR or
/// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
/// This matches shuffle vectors that look like:
///
/// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
///
/// Essentially it concatenates V1 and V2, shifts right by some number of
/// elements, and takes the low elements as the result. Note that while this is
/// specified as a *right shift* because x86 is little-endian, it is a *left
/// rotate* of the vector lanes.
static int matchVectorShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
ArrayRef<int> Mask) {
// Don't accept any shuffles with zero elements.
if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
return -1;
// PALIGNR works on 128-bit lanes.
SmallVector<int, 16> RepeatedMask;
if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
return -1;
int Rotation = matchVectorShuffleAsRotate(V1, V2, RepeatedMask);
if (Rotation <= 0)
return -1;
// PALIGNR rotates bytes, so we need to scale the
// rotation based on how many bytes are in the vector lane.
int NumElts = RepeatedMask.size();
int Scale = 16 / NumElts;
return Rotation * Scale;
}
static SDValue lowerVectorShuffleAsByteRotate(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
SDValue Lo = V1, Hi = V2;
int ByteRotation = matchVectorShuffleAsByteRotate(VT, Lo, Hi, Mask);
if (ByteRotation <= 0)
return SDValue();
// Cast the inputs to i8 vector of correct length to match PALIGNR or
// PSLLDQ/PSRLDQ.
MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
Lo = DAG.getBitcast(ByteVT, Lo);
Hi = DAG.getBitcast(ByteVT, Hi);
// SSSE3 targets can use the palignr instruction.
if (Subtarget.hasSSSE3()) {
assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
"512-bit PALIGNR requires BWI instructions");
return DAG.getBitcast(
VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
DAG.getConstant(ByteRotation, DL, MVT::i8)));
}
assert(VT.is128BitVector() &&
"Rotate-based lowering only supports 128-bit lowering!");
assert(Mask.size() <= 16 &&
"Can shuffle at most 16 bytes in a 128-bit vector!");
assert(ByteVT == MVT::v16i8 &&
"SSE2 rotate lowering only needed for v16i8!");
// Default SSE2 implementation
int LoByteShift = 16 - ByteRotation;
int HiByteShift = ByteRotation;
SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
DAG.getConstant(LoByteShift, DL, MVT::i8));
SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
DAG.getConstant(HiByteShift, DL, MVT::i8));
return DAG.getBitcast(VT,
DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
}
/// Try to lower a vector shuffle as a dword/qword rotation.
///
/// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
/// rotation of the concatenation of two vectors; This routine will
/// try to generically lower a vector shuffle through such an pattern.
///
/// Essentially it concatenates V1 and V2, shifts right by some number of
/// elements, and takes the low elements as the result. Note that while this is
/// specified as a *right shift* because x86 is little-endian, it is a *left
/// rotate* of the vector lanes.
static SDValue lowerVectorShuffleAsRotate(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
"Only 32-bit and 64-bit elements are supported!");
// 128/256-bit vectors are only supported with VLX.
assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
&& "VLX required for 128/256-bit vectors");
SDValue Lo = V1, Hi = V2;
int Rotation = matchVectorShuffleAsRotate(Lo, Hi, Mask);
if (Rotation <= 0)
return SDValue();
return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
DAG.getConstant(Rotation, DL, MVT::i8));
}
/// Try to lower a vector shuffle as a bit shift (shifts in zeros).
///
/// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
/// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
/// matches elements from one of the input vectors shuffled to the left or
/// right with zeroable elements 'shifted in'. It handles both the strictly
/// bit-wise element shifts and the byte shift across an entire 128-bit double
/// quad word lane.
///
/// PSHL : (little-endian) left bit shift.
/// [ zz, 0, zz, 2 ]
/// [ -1, 4, zz, -1 ]
/// PSRL : (little-endian) right bit shift.
/// [ 1, zz, 3, zz]
/// [ -1, -1, 7, zz]
/// PSLLDQ : (little-endian) left byte shift
/// [ zz, 0, 1, 2, 3, 4, 5, 6]
/// [ zz, zz, -1, -1, 2, 3, 4, -1]
/// [ zz, zz, zz, zz, zz, zz, -1, 1]
/// PSRLDQ : (little-endian) right byte shift
/// [ 5, 6, 7, zz, zz, zz, zz, zz]
/// [ -1, 5, 6, 7, zz, zz, zz, zz]
/// [ 1, 2, -1, -1, -1, -1, zz, zz]
static int matchVectorShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
unsigned ScalarSizeInBits,
ArrayRef<int> Mask, int MaskOffset,
const APInt &Zeroable,
const X86Subtarget &Subtarget) {
int Size = Mask.size();
unsigned SizeInBits = Size * ScalarSizeInBits;
auto CheckZeros = [&](int Shift, int Scale, bool Left) {
for (int i = 0; i < Size; i += Scale)
for (int j = 0; j < Shift; ++j)
if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
return false;
return true;
};
auto MatchShift = [&](int Shift, int Scale, bool Left) {
for (int i = 0; i != Size; i += Scale) {
unsigned Pos = Left ? i + Shift : i;
unsigned Low = Left ? i : i + Shift;
unsigned Len = Scale - Shift;
if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
return -1;
}
int ShiftEltBits = ScalarSizeInBits * Scale;
bool ByteShift = ShiftEltBits > 64;
Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
: (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
// Normalize the scale for byte shifts to still produce an i64 element
// type.
Scale = ByteShift ? Scale / 2 : Scale;
// We need to round trip through the appropriate type for the shift.
MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
: MVT::getVectorVT(ShiftSVT, Size / Scale);
return (int)ShiftAmt;
};
// SSE/AVX supports logical shifts up to 64-bit integers - so we can just
// keep doubling the size of the integer elements up to that. We can
// then shift the elements of the integer vector by whole multiples of
// their width within the elements of the larger integer vector. Test each
// multiple to see if we can find a match with the moved element indices
// and that the shifted in elements are all zeroable.
unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
for (int Shift = 1; Shift != Scale; ++Shift)
for (bool Left : {true, false})
if (CheckZeros(Shift, Scale, Left)) {
int ShiftAmt = MatchShift(Shift, Scale, Left);
if (0 < ShiftAmt)
return ShiftAmt;
}
// no match
return -1;
}
static SDValue lowerVectorShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
int Size = Mask.size();
assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
MVT ShiftVT;
SDValue V = V1;
unsigned Opcode;
// Try to match shuffle against V1 shift.
int ShiftAmt = matchVectorShuffleAsShift(
ShiftVT, Opcode, VT.getScalarSizeInBits(), Mask, 0, Zeroable, Subtarget);
// If V1 failed, try to match shuffle against V2 shift.
if (ShiftAmt < 0) {
ShiftAmt =
matchVectorShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
Mask, Size, Zeroable, Subtarget);
V = V2;
}
if (ShiftAmt < 0)
return SDValue();
assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
"Illegal integer vector type");
V = DAG.getBitcast(ShiftVT, V);
V = DAG.getNode(Opcode, DL, ShiftVT, V,
DAG.getConstant(ShiftAmt, DL, MVT::i8));
return DAG.getBitcast(VT, V);
}
// EXTRQ: Extract Len elements from lower half of source, starting at Idx.
// Remainder of lower half result is zero and upper half is all undef.
static bool matchVectorShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
ArrayRef<int> Mask, uint64_t &BitLen,
uint64_t &BitIdx, const APInt &Zeroable) {
int Size = Mask.size();
int HalfSize = Size / 2;
assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask");
// Upper half must be undefined.
if (!isUndefInRange(Mask, HalfSize, HalfSize))
return false;
// Determine the extraction length from the part of the
// lower half that isn't zeroable.
int Len = HalfSize;
for (; Len > 0; --Len)
if (!Zeroable[Len - 1])
break;
assert(Len > 0 && "Zeroable shuffle mask");
// Attempt to match first Len sequential elements from the lower half.
SDValue Src;
int Idx = -1;
for (int i = 0; i != Len; ++i) {
int M = Mask[i];
if (M == SM_SentinelUndef)
continue;
SDValue &V = (M < Size ? V1 : V2);
M = M % Size;
// The extracted elements must start at a valid index and all mask
// elements must be in the lower half.
if (i > M || M >= HalfSize)
return false;
if (Idx < 0 || (Src == V && Idx == (M - i))) {
Src = V;
Idx = M - i;
continue;
}
return false;
}
if (!Src || Idx < 0)
return false;
assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
V1 = Src;
return true;
}
// INSERTQ: Extract lowest Len elements from lower half of second source and
// insert over first source, starting at Idx.
// { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
static bool matchVectorShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
ArrayRef<int> Mask, uint64_t &BitLen,
uint64_t &BitIdx) {
int Size = Mask.size();
int HalfSize = Size / 2;
assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
// Upper half must be undefined.
if (!isUndefInRange(Mask, HalfSize, HalfSize))
return false;
for (int Idx = 0; Idx != HalfSize; ++Idx) {
SDValue Base;
// Attempt to match first source from mask before insertion point.
if (isUndefInRange(Mask, 0, Idx)) {
/* EMPTY */
} else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
Base = V1;
} else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
Base = V2;
} else {
continue;
}
// Extend the extraction length looking to match both the insertion of
// the second source and the remaining elements of the first.
for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
SDValue Insert;
int Len = Hi - Idx;
// Match insertion.
if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
Insert = V1;
} else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
Insert = V2;
} else {
continue;
}
// Match the remaining elements of the lower half.
if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
/* EMPTY */
} else if ((!Base || (Base == V1)) &&
isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
Base = V1;
} else if ((!Base || (Base == V2)) &&
isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
Size + Hi)) {
Base = V2;
} else {
continue;
}
BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
V1 = Base;
V2 = Insert;
return true;
}
}
return false;
}
/// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable,
SelectionDAG &DAG) {
uint64_t BitLen, BitIdx;
if (matchVectorShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
DAG.getConstant(BitLen, DL, MVT::i8),
DAG.getConstant(BitIdx, DL, MVT::i8));
if (matchVectorShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
V2 ? V2 : DAG.getUNDEF(VT),
DAG.getConstant(BitLen, DL, MVT::i8),
DAG.getConstant(BitIdx, DL, MVT::i8));
return SDValue();
}
/// Lower a vector shuffle as a zero or any extension.
///
/// Given a specific number of elements, element bit width, and extension
/// stride, produce either a zero or any extension based on the available
/// features of the subtarget. The extended elements are consecutive and
/// begin and can start from an offsetted element index in the input; to
/// avoid excess shuffling the offset must either being in the bottom lane
/// or at the start of a higher lane. All extended elements must be from
/// the same lane.
static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
assert(Scale > 1 && "Need a scale to extend.");
int EltBits = VT.getScalarSizeInBits();
int NumElements = VT.getVectorNumElements();
int NumEltsPerLane = 128 / EltBits;
int OffsetLane = Offset / NumEltsPerLane;
assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
"Only 8, 16, and 32 bit elements can be extended.");
assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
assert(0 <= Offset && "Extension offset must be positive.");
assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
"Extension offset must be in the first lane or start an upper lane.");
// Check that an index is in same lane as the base offset.
auto SafeOffset = [&](int Idx) {
return OffsetLane == (Idx / NumEltsPerLane);
};
// Shift along an input so that the offset base moves to the first element.
auto ShuffleOffset = [&](SDValue V) {
if (!Offset)
return V;
SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
for (int i = 0; i * Scale < NumElements; ++i) {
int SrcIdx = i + Offset;
ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
}
return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
};
// Found a valid zext mask! Try various lowering strategies based on the
// input type and available ISA extensions.
if (Subtarget.hasSSE41()) {
// Not worth offsetting 128-bit vectors if scale == 2, a pattern using
// PUNPCK will catch this in a later shuffle match.
if (Offset && Scale == 2 && VT.is128BitVector())
return SDValue();
MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
NumElements / Scale);
InputV = ShuffleOffset(InputV);
InputV = getExtendInVec(X86ISD::VZEXT, DL, ExtVT, InputV, DAG);
return DAG.getBitcast(VT, InputV);
}
assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
// For any extends we can cheat for larger element sizes and use shuffle
// instructions that can fold with a load and/or copy.
if (AnyExt && EltBits == 32) {
int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
-1};
return DAG.getBitcast(
VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
DAG.getBitcast(MVT::v4i32, InputV),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
}
if (AnyExt && EltBits == 16 && Scale > 2) {
int PSHUFDMask[4] = {Offset / 2, -1,
SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
DAG.getBitcast(MVT::v4i32, InputV),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
int PSHUFWMask[4] = {1, -1, -1, -1};
unsigned OddEvenOp = (Offset & 1 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW);
return DAG.getBitcast(
VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
DAG.getBitcast(MVT::v8i16, InputV),
getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
}
// The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
// to 64-bits.
if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
assert(VT.is128BitVector() && "Unexpected vector width!");
int LoIdx = Offset * EltBits;
SDValue Lo = DAG.getBitcast(
MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
DAG.getConstant(EltBits, DL, MVT::i8),
DAG.getConstant(LoIdx, DL, MVT::i8)));
if (isUndefInRange(Mask, NumElements / 2, NumElements / 2) ||
!SafeOffset(Offset + 1))
return DAG.getBitcast(VT, Lo);
int HiIdx = (Offset + 1) * EltBits;
SDValue Hi = DAG.getBitcast(
MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
DAG.getConstant(EltBits, DL, MVT::i8),
DAG.getConstant(HiIdx, DL, MVT::i8)));
return DAG.getBitcast(VT,
DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
}
// If this would require more than 2 unpack instructions to expand, use
// pshufb when available. We can only use more than 2 unpack instructions
// when zero extending i8 elements which also makes it easier to use pshufb.
if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
assert(NumElements == 16 && "Unexpected byte vector width!");
SDValue PSHUFBMask[16];
for (int i = 0; i < 16; ++i) {
int Idx = Offset + (i / Scale);
PSHUFBMask[i] = DAG.getConstant(
(i % Scale == 0 && SafeOffset(Idx)) ? Idx : 0x80, DL, MVT::i8);
}
InputV = DAG.getBitcast(MVT::v16i8, InputV);
return DAG.getBitcast(
VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
}
// If we are extending from an offset, ensure we start on a boundary that
// we can unpack from.
int AlignToUnpack = Offset % (NumElements / Scale);
if (AlignToUnpack) {
SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
for (int i = AlignToUnpack; i < NumElements; ++i)
ShMask[i - AlignToUnpack] = i;
InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
Offset -= AlignToUnpack;
}
// Otherwise emit a sequence of unpacks.
do {
unsigned UnpackLoHi = X86ISD::UNPCKL;
if (Offset >= (NumElements / 2)) {
UnpackLoHi = X86ISD::UNPCKH;
Offset -= (NumElements / 2);
}
MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
: getZeroVector(InputVT, Subtarget, DAG, DL);
InputV = DAG.getBitcast(InputVT, InputV);
InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
Scale /= 2;
EltBits *= 2;
NumElements /= 2;
} while (Scale > 1);
return DAG.getBitcast(VT, InputV);
}
/// Try to lower a vector shuffle as a zero extension on any microarch.
///
/// This routine will try to do everything in its power to cleverly lower
/// a shuffle which happens to match the pattern of a zero extend. It doesn't
/// check for the profitability of this lowering, it tries to aggressively
/// match this pattern. It will use all of the micro-architectural details it
/// can to emit an efficient lowering. It handles both blends with all-zero
/// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
/// masking out later).
///
/// The reason we have dedicated lowering for zext-style shuffles is that they
/// are both incredibly common and often quite performance sensitive.
static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
int Bits = VT.getSizeInBits();
int NumLanes = Bits / 128;
int NumElements = VT.getVectorNumElements();
int NumEltsPerLane = NumElements / NumLanes;
assert(VT.getScalarSizeInBits() <= 32 &&
"Exceeds 32-bit integer zero extension limit");
assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
// Define a helper function to check a particular ext-scale and lower to it if
// valid.
auto Lower = [&](int Scale) -> SDValue {
SDValue InputV;
bool AnyExt = true;
int Offset = 0;
int Matches = 0;
for (int i = 0; i < NumElements; ++i) {
int M = Mask[i];
if (M < 0)
continue; // Valid anywhere but doesn't tell us anything.
if (i % Scale != 0) {
// Each of the extended elements need to be zeroable.
if (!Zeroable[i])
return SDValue();
// We no longer are in the anyext case.
AnyExt = false;
continue;
}
// Each of the base elements needs to be consecutive indices into the
// same input vector.
SDValue V = M < NumElements ? V1 : V2;
M = M % NumElements;
if (!InputV) {
InputV = V;
Offset = M - (i / Scale);
} else if (InputV != V)
return SDValue(); // Flip-flopping inputs.
// Offset must start in the lowest 128-bit lane or at the start of an
// upper lane.
// FIXME: Is it ever worth allowing a negative base offset?
if (!((0 <= Offset && Offset < NumEltsPerLane) ||
(Offset % NumEltsPerLane) == 0))
return SDValue();
// If we are offsetting, all referenced entries must come from the same
// lane.
if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
return SDValue();
if ((M % NumElements) != (Offset + (i / Scale)))
return SDValue(); // Non-consecutive strided elements.
Matches++;
}
// If we fail to find an input, we have a zero-shuffle which should always
// have already been handled.
// FIXME: Maybe handle this here in case during blending we end up with one?
if (!InputV)
return SDValue();
// If we are offsetting, don't extend if we only match a single input, we
// can always do better by using a basic PSHUF or PUNPCK.
if (Offset != 0 && Matches < 2)
return SDValue();
return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
DL, VT, Scale, Offset, AnyExt, InputV, Mask, Subtarget, DAG);
};
// The widest scale possible for extending is to a 64-bit integer.
assert(Bits % 64 == 0 &&
"The number of bits in a vector must be divisible by 64 on x86!");
int NumExtElements = Bits / 64;
// Each iteration, try extending the elements half as much, but into twice as
// many elements.
for (; NumExtElements < NumElements; NumExtElements *= 2) {
assert(NumElements % NumExtElements == 0 &&
"The input vector size must be divisible by the extended size.");
if (SDValue V = Lower(NumElements / NumExtElements))
return V;
}
// General extends failed, but 128-bit vectors may be able to use MOVQ.
if (Bits != 128)
return SDValue();
// Returns one of the source operands if the shuffle can be reduced to a
// MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
auto CanZExtLowHalf = [&]() {
for (int i = NumElements / 2; i != NumElements; ++i)
if (!Zeroable[i])
return SDValue();
if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
return V1;
if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
return V2;
return SDValue();
};
if (SDValue V = CanZExtLowHalf()) {
V = DAG.getBitcast(MVT::v2i64, V);
V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
return DAG.getBitcast(VT, V);
}
// No viable ext lowering found.
return SDValue();
}
/// Try to get a scalar value for a specific element of a vector.
///
/// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
SelectionDAG &DAG) {
MVT VT = V.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
V = peekThroughBitcasts(V);
// If the bitcasts shift the element size, we can't extract an equivalent
// element from it.
MVT NewVT = V.getSimpleValueType();
if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
return SDValue();
if (V.getOpcode() == ISD::BUILD_VECTOR ||
(Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
// Ensure the scalar operand is the same size as the destination.
// FIXME: Add support for scalar truncation where possible.
SDValue S = V.getOperand(Idx);
if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
return DAG.getBitcast(EltVT, S);
}
return SDValue();
}
/// Helper to test for a load that can be folded with x86 shuffles.
///
/// This is particularly important because the set of instructions varies
/// significantly based on whether the operand is a load or not.
static bool isShuffleFoldableLoad(SDValue V) {
V = peekThroughBitcasts(V);
return ISD::isNON_EXTLoad(V.getNode());
}
/// Try to lower insertion of a single element into a zero vector.
///
/// This is a common pattern that we have especially efficient patterns to lower
/// across all subtarget feature sets.
static SDValue lowerVectorShuffleAsElementInsertion(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT ExtVT = VT;
MVT EltVT = VT.getVectorElementType();
int V2Index =
find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
Mask.begin();
bool IsV1Zeroable = true;
for (int i = 0, Size = Mask.size(); i < Size; ++i)
if (i != V2Index && !Zeroable[i]) {
IsV1Zeroable = false;
break;
}
// Check for a single input from a SCALAR_TO_VECTOR node.
// FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
// all the smarts here sunk into that routine. However, the current
// lowering of BUILD_VECTOR makes that nearly impossible until the old
// vector shuffle lowering is dead.
SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
DAG);
if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
// We need to zext the scalar if it is smaller than an i32.
V2S = DAG.getBitcast(EltVT, V2S);
if (EltVT == MVT::i8 || EltVT == MVT::i16) {
// Using zext to expand a narrow element won't work for non-zero
// insertions.
if (!IsV1Zeroable)
return SDValue();
// Zero-extend directly to i32.
ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
}
V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
} else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
EltVT == MVT::i16) {
// Either not inserting from the low element of the input or the input
// element size is too small to use VZEXT_MOVL to clear the high bits.
return SDValue();
}
if (!IsV1Zeroable) {
// If V1 can't be treated as a zero vector we have fewer options to lower
// this. We can't support integer vectors or non-zero targets cheaply, and
// the V1 elements can't be permuted in any way.
assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
if (!VT.isFloatingPoint() || V2Index != 0)
return SDValue();
SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
V1Mask[V2Index] = -1;
if (!isNoopShuffleMask(V1Mask))
return SDValue();
if (!VT.is128BitVector())
return SDValue();
// Otherwise, use MOVSD or MOVSS.
assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
"Only two types of floating point element types to handle!");
return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
ExtVT, V1, V2);
}
// This lowering only works for the low element with floating point vectors.
if (VT.isFloatingPoint() && V2Index != 0)
return SDValue();
V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
if (ExtVT != VT)
V2 = DAG.getBitcast(VT, V2);
if (V2Index != 0) {
// If we have 4 or fewer lanes we can cheaply shuffle the element into
// the desired position. Otherwise it is more efficient to do a vector
// shift left. We know that we can do a vector shift left because all
// the inputs are zero.
if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
V2Shuffle[V2Index] = 0;
V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
} else {
V2 = DAG.getBitcast(MVT::v16i8, V2);
V2 = DAG.getNode(
X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
DAG.getConstant(V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
V2 = DAG.getBitcast(VT, V2);
}
}
return V2;
}
/// Try to lower broadcast of a single - truncated - integer element,
/// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
///
/// This assumes we have AVX2.
static SDValue lowerVectorShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT,
SDValue V0, int BroadcastIdx,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Subtarget.hasAVX2() &&
"We can only lower integer broadcasts with AVX2!");
EVT EltVT = VT.getVectorElementType();
EVT V0VT = V0.getValueType();
assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
EVT V0EltVT = V0VT.getVectorElementType();
if (!V0EltVT.isInteger())
return SDValue();
const unsigned EltSize = EltVT.getSizeInBits();
const unsigned V0EltSize = V0EltVT.getSizeInBits();
// This is only a truncation if the original element type is larger.
if (V0EltSize <= EltSize)
return SDValue();
assert(((V0EltSize % EltSize) == 0) &&
"Scalar type sizes must all be powers of 2 on x86!");
const unsigned V0Opc = V0.getOpcode();
const unsigned Scale = V0EltSize / EltSize;
const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
V0Opc != ISD::BUILD_VECTOR)
return SDValue();
SDValue Scalar = V0.getOperand(V0BroadcastIdx);
// If we're extracting non-least-significant bits, shift so we can truncate.
// Hopefully, we can fold away the trunc/srl/load into the broadcast.
// Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
// vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
if (const int OffsetIdx = BroadcastIdx % Scale)
Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
}
/// Try to lower broadcast of a single element.
///
/// For convenience, this code also bundles all of the subtarget feature set
/// filtering. While a little annoying to re-dispatch on type here, there isn't
/// a convenient way to factor it out.
static SDValue lowerVectorShuffleAsBroadcast(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
(Subtarget.hasAVX() && VT.isFloatingPoint()) ||
(Subtarget.hasAVX2() && VT.isInteger())))
return SDValue();
// With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
// we can only broadcast from a register with AVX2.
unsigned NumElts = Mask.size();
unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
? X86ISD::MOVDDUP
: X86ISD::VBROADCAST;
bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
// Check that the mask is a broadcast.
int BroadcastIdx = -1;
for (int i = 0; i != (int)NumElts; ++i) {
SmallVector<int, 8> BroadcastMask(NumElts, i);
if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
BroadcastIdx = i;
break;
}
}
if (BroadcastIdx < 0)
return SDValue();
assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
"a sorted mask where the broadcast "
"comes from V1.");
// Go up the chain of (vector) values to find a scalar load that we can
// combine with the broadcast.
SDValue V = V1;
for (;;) {
switch (V.getOpcode()) {
case ISD::BITCAST: {
// Peek through bitcasts as long as BroadcastIdx can be adjusted.
SDValue VSrc = V.getOperand(0);
unsigned NumEltBits = V.getScalarValueSizeInBits();
unsigned NumSrcBits = VSrc.getScalarValueSizeInBits();
if ((NumEltBits % NumSrcBits) == 0)
BroadcastIdx *= (NumEltBits / NumSrcBits);
else if ((NumSrcBits % NumEltBits) == 0 &&
(BroadcastIdx % (NumSrcBits / NumEltBits)) == 0)
BroadcastIdx /= (NumSrcBits / NumEltBits);
else
break;
V = VSrc;
continue;
}
case ISD::CONCAT_VECTORS: {
int OperandSize = Mask.size() / V.getNumOperands();
V = V.getOperand(BroadcastIdx / OperandSize);
BroadcastIdx %= OperandSize;
continue;
}
case ISD::INSERT_SUBVECTOR: {
SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
if (!ConstantIdx)
break;
int BeginIdx = (int)ConstantIdx->getZExtValue();
int EndIdx =
BeginIdx + (int)VInner.getSimpleValueType().getVectorNumElements();
if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
BroadcastIdx -= BeginIdx;
V = VInner;
} else {
V = VOuter;
}
continue;
}
}
break;
}
// Ensure the source vector and BroadcastIdx are for a suitable type.
if (VT.getScalarSizeInBits() != V.getScalarValueSizeInBits()) {
unsigned NumEltBits = VT.getScalarSizeInBits();
unsigned NumSrcBits = V.getScalarValueSizeInBits();
if ((NumSrcBits % NumEltBits) == 0)
BroadcastIdx *= (NumSrcBits / NumEltBits);
else if ((NumEltBits % NumSrcBits) == 0 &&
(BroadcastIdx % (NumEltBits / NumSrcBits)) == 0)
BroadcastIdx /= (NumEltBits / NumSrcBits);
else
return SDValue();
unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
MVT SrcVT = MVT::getVectorVT(VT.getScalarType(), NumSrcElts);
V = DAG.getBitcast(SrcVT, V);
}
// Check if this is a broadcast of a scalar. We special case lowering
// for scalars so that we can more effectively fold with loads.
// First, look through bitcast: if the original value has a larger element
// type than the shuffle, the broadcast element is in essence truncated.
// Make that explicit to ease folding.
if (V.getOpcode() == ISD::BITCAST && VT.isInteger())
if (SDValue TruncBroadcast = lowerVectorShuffleAsTruncBroadcast(
DL, VT, V.getOperand(0), BroadcastIdx, Subtarget, DAG))
return TruncBroadcast;
MVT BroadcastVT = VT;
// Peek through any bitcast (only useful for loads).
SDValue BC = peekThroughBitcasts(V);
// Also check the simpler case, where we can directly reuse the scalar.
if (V.getOpcode() == ISD::BUILD_VECTOR ||
(V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
V = V.getOperand(BroadcastIdx);
// If we can't broadcast from a register, check that the input is a load.
if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
return SDValue();
} else if (MayFoldLoad(BC) && !cast<LoadSDNode>(BC)->isVolatile()) {
// 32-bit targets need to load i64 as a f64 and then bitcast the result.
if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
Opcode = (BroadcastVT.is128BitVector() && !Subtarget.hasAVX2())
? X86ISD::MOVDDUP
: Opcode;
}
// If we are broadcasting a load that is only used by the shuffle
// then we can reduce the vector load to the broadcasted scalar load.
LoadSDNode *Ld = cast<LoadSDNode>(BC);
SDValue BaseAddr = Ld->getOperand(1);
EVT SVT = BroadcastVT.getScalarType();
unsigned Offset = BroadcastIdx * SVT.getStoreSize();
SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
DAG.getMachineFunction().getMachineMemOperand(
Ld->getMemOperand(), Offset, SVT.getStoreSize()));
DAG.makeEquivalentMemoryOrdering(Ld, V);
} else if (!BroadcastFromReg) {
// We can't broadcast from a vector register.
return SDValue();
} else if (BroadcastIdx != 0) {
// We can only broadcast from the zero-element of a vector register,
// but it can be advantageous to broadcast from the zero-element of a
// subvector.
if (!VT.is256BitVector() && !VT.is512BitVector())
return SDValue();
// VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
if (VT == MVT::v4f64 || VT == MVT::v4i64)
return SDValue();
// Only broadcast the zero-element of a 128-bit subvector.
unsigned EltSize = VT.getScalarSizeInBits();
if (((BroadcastIdx * EltSize) % 128) != 0)
return SDValue();
// The shuffle input might have been a bitcast we looked through; look at
// the original input vector. Emit an EXTRACT_SUBVECTOR of that type; we'll
// later bitcast it to BroadcastVT.
assert(V.getScalarValueSizeInBits() == BroadcastVT.getScalarSizeInBits() &&
"Unexpected vector element size");
assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
"Unexpected vector size");
V = extract128BitVector(V, BroadcastIdx, DAG, DL);
}
if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
DAG.getBitcast(MVT::f64, V));
// Bitcast back to the same scalar type as BroadcastVT.
MVT SrcVT = V.getSimpleValueType();
if (SrcVT.getScalarType() != BroadcastVT.getScalarType()) {
assert(SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits() &&
"Unexpected vector element size");
if (SrcVT.isVector()) {
unsigned NumSrcElts = SrcVT.getVectorNumElements();
SrcVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
} else {
SrcVT = BroadcastVT.getScalarType();
}
V = DAG.getBitcast(SrcVT, V);
}
// 32-bit targets need to load i64 as a f64 and then bitcast the result.
if (!Subtarget.is64Bit() && SrcVT == MVT::i64) {
V = DAG.getBitcast(MVT::f64, V);
unsigned NumBroadcastElts = BroadcastVT.getVectorNumElements();
BroadcastVT = MVT::getVectorVT(MVT::f64, NumBroadcastElts);
}
// We only support broadcasting from 128-bit vectors to minimize the
// number of patterns we need to deal with in isel. So extract down to
// 128-bits, removing as many bitcasts as possible.
if (SrcVT.getSizeInBits() > 128) {
MVT ExtVT = MVT::getVectorVT(SrcVT.getScalarType(),
128 / SrcVT.getScalarSizeInBits());
V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
V = DAG.getBitcast(ExtVT, V);
}
return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
}
// Check for whether we can use INSERTPS to perform the shuffle. We only use
// INSERTPS when the V1 elements are already in the correct locations
// because otherwise we can just always use two SHUFPS instructions which
// are much smaller to encode than a SHUFPS and an INSERTPS. We can also
// perform INSERTPS if a single V1 element is out of place and all V2
// elements are zeroable.
static bool matchVectorShuffleAsInsertPS(SDValue &V1, SDValue &V2,
unsigned &InsertPSMask,
const APInt &Zeroable,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
// Attempt to match INSERTPS with one element from VA or VB being
// inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
// are updated.
auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
ArrayRef<int> CandidateMask) {
unsigned ZMask = 0;
int VADstIndex = -1;
int VBDstIndex = -1;
bool VAUsedInPlace = false;
for (int i = 0; i < 4; ++i) {
// Synthesize a zero mask from the zeroable elements (includes undefs).
if (Zeroable[i]) {
ZMask |= 1 << i;
continue;
}
// Flag if we use any VA inputs in place.
if (i == CandidateMask[i]) {
VAUsedInPlace = true;
continue;
}
// We can only insert a single non-zeroable element.
if (VADstIndex >= 0 || VBDstIndex >= 0)
return false;
if (CandidateMask[i] < 4) {
// VA input out of place for insertion.
VADstIndex = i;
} else {
// VB input for insertion.
VBDstIndex = i;
}
}
// Don't bother if we have no (non-zeroable) element for insertion.
if (VADstIndex < 0 && VBDstIndex < 0)
return false;
// Determine element insertion src/dst indices. The src index is from the
// start of the inserted vector, not the start of the concatenated vector.
unsigned VBSrcIndex = 0;
if (VADstIndex >= 0) {
// If we have a VA input out of place, we use VA as the V2 element
// insertion and don't use the original V2 at all.
VBSrcIndex = CandidateMask[VADstIndex];
VBDstIndex = VADstIndex;
VB = VA;
} else {
VBSrcIndex = CandidateMask[VBDstIndex] - 4;
}
// If no V1 inputs are used in place, then the result is created only from
// the zero mask and the V2 insertion - so remove V1 dependency.
if (!VAUsedInPlace)
VA = DAG.getUNDEF(MVT::v4f32);
// Update V1, V2 and InsertPSMask accordingly.
V1 = VA;
V2 = VB;
// Insert the V2 element into the desired position.
InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
return true;
};
if (matchAsInsertPS(V1, V2, Mask))
return true;
// Commute and try again.
SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
ShuffleVectorSDNode::commuteMask(CommutedMask);
if (matchAsInsertPS(V2, V1, CommutedMask))
return true;
return false;
}
static SDValue lowerVectorShuffleAsInsertPS(const SDLoc &DL, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
// Attempt to match the insertps pattern.
unsigned InsertPSMask;
if (!matchVectorShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
return SDValue();
// Insert the V2 element into the desired position.
return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
DAG.getConstant(InsertPSMask, DL, MVT::i8));
}
/// Try to lower a shuffle as a permute of the inputs followed by an
/// UNPCK instruction.
///
/// This specifically targets cases where we end up with alternating between
/// the two inputs, and so can permute them into something that feeds a single
/// UNPCK instruction. Note that this routine only targets integer vectors
/// because for floating point vectors we have a generalized SHUFPS lowering
/// strategy that handles everything that doesn't *exactly* match an unpack,
/// making this clever lowering unnecessary.
static SDValue lowerVectorShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(!VT.isFloatingPoint() &&
"This routine only supports integer vectors.");
assert(VT.is128BitVector() &&
"This routine only works on 128-bit vectors.");
assert(!V2.isUndef() &&
"This routine should only be used when blending two inputs.");
assert(Mask.size() >= 2 && "Single element masks are invalid.");
int Size = Mask.size();
int NumLoInputs =
count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
int NumHiInputs =
count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
bool UnpackLo = NumLoInputs >= NumHiInputs;
auto TryUnpack = [&](int ScalarSize, int Scale) {
SmallVector<int, 16> V1Mask((unsigned)Size, -1);
SmallVector<int, 16> V2Mask((unsigned)Size, -1);
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
continue;
// Each element of the unpack contains Scale elements from this mask.
int UnpackIdx = i / Scale;
// We only handle the case where V1 feeds the first slots of the unpack.
// We rely on canonicalization to ensure this is the case.
if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
return SDValue();
// Setup the mask for this input. The indexing is tricky as we have to
// handle the unpack stride.
SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
Mask[i] % Size;
}
// If we will have to shuffle both inputs to use the unpack, check whether
// we can just unpack first and shuffle the result. If so, skip this unpack.
if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
!isNoopShuffleMask(V2Mask))
return SDValue();
// Shuffle the inputs into place.
V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
// Cast the inputs to the type we will use to unpack them.
MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
V1 = DAG.getBitcast(UnpackVT, V1);
V2 = DAG.getBitcast(UnpackVT, V2);
// Unpack the inputs and cast the result back to the desired type.
return DAG.getBitcast(
VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
UnpackVT, V1, V2));
};
// We try each unpack from the largest to the smallest to try and find one
// that fits this mask.
int OrigScalarSize = VT.getScalarSizeInBits();
for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
return Unpack;
// If none of the unpack-rooted lowerings worked (or were profitable) try an
// initial unpack.
if (NumLoInputs == 0 || NumHiInputs == 0) {
assert((NumLoInputs > 0 || NumHiInputs > 0) &&
"We have to have *some* inputs!");
int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
// FIXME: We could consider the total complexity of the permute of each
// possible unpacking. Or at the least we should consider how many
// half-crossings are created.
// FIXME: We could consider commuting the unpacks.
SmallVector<int, 32> PermMask((unsigned)Size, -1);
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
continue;
assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
PermMask[i] =
2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
}
return DAG.getVectorShuffle(
VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
DL, VT, V1, V2),
DAG.getUNDEF(VT), PermMask);
}
return SDValue();
}
/// Handle lowering of 2-lane 64-bit floating point shuffles.
///
/// This is the basis function for the 2-lane 64-bit shuffles as we have full
/// support for floating point shuffles but not integer shuffles. These
/// instructions will incur a domain crossing penalty on some chips though so
/// it is better to avoid lowering through this for integer vectors where
/// possible.
static SDValue lowerV2F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
if (V2.isUndef()) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v2f64, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Straight shuffle of a single input vector. Simulate this by using the
// single input as both of the "inputs" to this instruction..
unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
if (Subtarget.hasAVX()) {
// If we have AVX, we can use VPERMILPS which will allow folding a load
// into the shuffle.
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
DAG.getConstant(SHUFPDMask, DL, MVT::i8));
}
return DAG.getNode(
X86ISD::SHUFP, DL, MVT::v2f64,
Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
DAG.getConstant(SHUFPDMask, DL, MVT::i8));
}
assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
assert(Mask[0] < 2 && "We sort V1 to be the first input.");
assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
// When loading a scalar and then shuffling it into a vector we can often do
// the insertion cheaply.
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
return Insertion;
// Try inverting the insertion since for v2 masks it is easy to do and we
// can't reliably sort the mask one way or the other.
int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
return Insertion;
// Try to use one of the special instruction patterns to handle two common
// blend patterns if a zero-blend above didn't work.
if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
isShuffleEquivalent(V1, V2, Mask, {1, 3}))
if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
// We can either use a special instruction to load over the low double or
// to move just the low double.
return DAG.getNode(
X86ISD::MOVSD, DL, MVT::v2f64, V2,
DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
if (Subtarget.hasSSE41())
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
return V;
unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
DAG.getConstant(SHUFPDMask, DL, MVT::i8));
}
/// Handle lowering of 2-lane 64-bit integer shuffles.
///
/// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
/// the integer unit to minimize domain crossing penalties. However, for blends
/// it falls back to the floating point shuffle operation with appropriate bit
/// casting.
static SDValue lowerV2I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
if (V2.isUndef()) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Straight shuffle of a single input vector. For everything from SSE2
// onward this has a single fast instruction with no scary immediates.
// We have to map the mask as it is actually a v4i32 shuffle instruction.
V1 = DAG.getBitcast(MVT::v4i32, V1);
int WidenedMask[4] = {
std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
return DAG.getBitcast(
MVT::v2i64,
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
}
assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
assert(Mask[0] < 2 && "We sort V1 to be the first input.");
assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// When loading a scalar and then shuffling it into a vector we can often do
// the insertion cheaply.
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
return Insertion;
// Try inverting the insertion since for v2 masks it is easy to do and we
// can't reliably sort the mask one way or the other.
int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
return Insertion;
// We have different paths for blend lowering, but they all must use the
// *exact* same predicate.
bool IsBlendSupported = Subtarget.hasSSE41();
if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
return V;
// Try to use byte rotation instructions.
// Its more profitable for pre-SSSE3 to use shuffles/unpacks.
if (Subtarget.hasSSSE3()) {
if (Subtarget.hasVLX())
if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v2i64, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
return Rotate;
}
// If we have direct support for blends, we should lower by decomposing into
// a permute. That will be faster than the domain cross.
if (IsBlendSupported)
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
Mask, DAG);
// We implement this with SHUFPD which is pretty lame because it will likely
// incur 2 cycles of stall for integer vectors on Nehalem and older chips.
// However, all the alternatives are still more cycles and newer chips don't
// have this problem. It would be really nice if x86 had better shuffles here.
V1 = DAG.getBitcast(MVT::v2f64, V1);
V2 = DAG.getBitcast(MVT::v2f64, V2);
return DAG.getBitcast(MVT::v2i64,
DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
}
/// Test whether this can be lowered with a single SHUFPS instruction.
///
/// This is used to disable more specialized lowerings when the shufps lowering
/// will happen to be efficient.
static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
// This routine only handles 128-bit shufps.
assert(Mask.size() == 4 && "Unsupported mask size!");
assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
// To lower with a single SHUFPS we need to have the low half and high half
// each requiring a single input.
if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
return false;
if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
return false;
return true;
}
/// Lower a vector shuffle using the SHUFPS instruction.
///
/// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
/// It makes no assumptions about whether this is the *best* lowering, it simply
/// uses it.
static SDValue lowerVectorShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
SDValue LowV = V1, HighV = V2;
int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
if (NumV2Elements == 1) {
int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
// Compute the index adjacent to V2Index and in the same half by toggling
// the low bit.
int V2AdjIndex = V2Index ^ 1;
if (Mask[V2AdjIndex] < 0) {
// Handles all the cases where we have a single V2 element and an undef.
// This will only ever happen in the high lanes because we commute the
// vector otherwise.
if (V2Index < 2)
std::swap(LowV, HighV);
NewMask[V2Index] -= 4;
} else {
// Handle the case where the V2 element ends up adjacent to a V1 element.
// To make this work, blend them together as the first step.
int V1Index = V2AdjIndex;
int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
// Now proceed to reconstruct the final blend as we have the necessary
// high or low half formed.
if (V2Index < 2) {
LowV = V2;
HighV = V1;
} else {
HighV = V2;
}
NewMask[V1Index] = 2; // We put the V1 element in V2[2].
NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
}
} else if (NumV2Elements == 2) {
if (Mask[0] < 4 && Mask[1] < 4) {
// Handle the easy case where we have V1 in the low lanes and V2 in the
// high lanes.
NewMask[2] -= 4;
NewMask[3] -= 4;
} else if (Mask[2] < 4 && Mask[3] < 4) {
// We also handle the reversed case because this utility may get called
// when we detect a SHUFPS pattern but can't easily commute the shuffle to
// arrange things in the right direction.
NewMask[0] -= 4;
NewMask[1] -= 4;
HighV = V1;
LowV = V2;
} else {
// We have a mixture of V1 and V2 in both low and high lanes. Rather than
// trying to place elements directly, just blend them and set up the final
// shuffle to place them.
// The first two blend mask elements are for V1, the second two are for
// V2.
int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
Mask[2] < 4 ? Mask[2] : Mask[3],
(Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
(Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
// Now we do a normal shuffle of V1 by giving V1 as both operands to
// a blend.
LowV = HighV = V1;
NewMask[0] = Mask[0] < 4 ? 0 : 2;
NewMask[1] = Mask[0] < 4 ? 2 : 0;
NewMask[2] = Mask[2] < 4 ? 1 : 3;
NewMask[3] = Mask[2] < 4 ? 3 : 1;
}
}
return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
}
/// Lower 4-lane 32-bit floating point shuffles.
///
/// Uses instructions exclusively from the floating point unit to minimize
/// domain crossing penalties, as these are sufficient to implement all v4f32
/// shuffles.
static SDValue lowerV4F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v4f32, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Use even/odd duplicate instructions for masks that match their pattern.
if (Subtarget.hasSSE3()) {
if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
}
if (Subtarget.hasAVX()) {
// If we have AVX, we can use VPERMILPS which will allow folding a load
// into the shuffle.
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
}
// Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
// in SSE1 because otherwise they are widened to v2f64 and never get here.
if (!Subtarget.hasSSE2()) {
if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}))
return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3}))
return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
}
// Otherwise, use a straight shuffle of a single input vector. We pass the
// input vector to both operands to simulate this with a SHUFPS.
return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
}
// There are special ways we can lower some single-element blends. However, we
// have custom ways we can lower more complex single-element blends below that
// we defer to if both this and BLENDPS fail to match, so restrict this to
// when the V2 input is targeting element 0 of the mask -- that is the fast
// case here.
if (NumV2Elements == 1 && Mask[0] >= 4)
if (SDValue V = lowerVectorShuffleAsElementInsertion(
DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
return V;
if (Subtarget.hasSSE41()) {
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Use INSERTPS if we can complete the shuffle efficiently.
if (SDValue V =
lowerVectorShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
return V;
if (!isSingleSHUFPSMask(Mask))
if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
DL, MVT::v4f32, V1, V2, Mask, DAG))
return BlendPerm;
}
// Use low/high mov instructions. These are only valid in SSE1 because
// otherwise they are widened to v2f64 and never get here.
if (!Subtarget.hasSSE2()) {
if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
}
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
return V;
// Otherwise fall back to a SHUFPS lowering strategy.
return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
}
/// Lower 4-lane i32 vector shuffles.
///
/// We try to handle these with integer-domain shuffles where we can, but for
/// blends we use the floating point domain blend instructions.
static SDValue lowerV4I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Straight shuffle of a single input vector. For everything from SSE2
// onward this has a single fast instruction with no scary immediates.
// We coerce the shuffle pattern to be compatible with UNPCK instructions
// but we aren't actually going to use the UNPCK instruction because doing
// so prevents folding a load into this instruction or making a copy.
const int UnpackLoMask[] = {0, 0, 1, 1};
const int UnpackHiMask[] = {2, 2, 3, 3};
if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
Mask = UnpackLoMask;
else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
Mask = UnpackHiMask;
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
}
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// There are special ways we can lower some single-element blends.
if (NumV2Elements == 1)
if (SDValue V = lowerVectorShuffleAsElementInsertion(
DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
return V;
// We have different paths for blend lowering, but they all must use the
// *exact* same predicate.
bool IsBlendSupported = Subtarget.hasSSE41();
if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
Zeroable, DAG))
return Masked;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
return V;
// Try to use byte rotation instructions.
// Its more profitable for pre-SSSE3 to use shuffles/unpacks.
if (Subtarget.hasSSSE3()) {
if (Subtarget.hasVLX())
if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v4i32, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
return Rotate;
}
// Assume that a single SHUFPS is faster than an alternative sequence of
// multiple instructions (even if the CPU has a domain penalty).
// If some CPU is harmed by the domain switch, we can fix it in a later pass.
if (!isSingleSHUFPSMask(Mask)) {
// If we have direct support for blends, we should lower by decomposing into
// a permute. That will be faster than the domain cross.
if (IsBlendSupported)
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
Mask, DAG);
// Try to lower by permuting the inputs into an unpack instruction.
if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
DL, MVT::v4i32, V1, V2, Mask, DAG))
return Unpack;
}
// We implement this with SHUFPS because it can blend from two vectors.
// Because we're going to eventually use SHUFPS, we use SHUFPS even to build
// up the inputs, bypassing domain shift penalties that we would incur if we
// directly used PSHUFD on Nehalem and older. For newer chips, this isn't
// relevant.
SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
return DAG.getBitcast(MVT::v4i32, ShufPS);
}
/// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
/// shuffle lowering, and the most complex part.
///
/// The lowering strategy is to try to form pairs of input lanes which are
/// targeted at the same half of the final vector, and then use a dword shuffle
/// to place them onto the right half, and finally unpack the paired lanes into
/// their final position.
///
/// The exact breakdown of how to form these dword pairs and align them on the
/// correct sides is really tricky. See the comments within the function for
/// more of the details.
///
/// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
/// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
/// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
/// vector, form the analogous 128-bit 8-element Mask.
static SDValue lowerV8I16GeneralSingleInputVectorShuffle(
const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
const X86Subtarget &Subtarget, SelectionDAG &DAG) {
assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
MutableArrayRef<int> LoMask = Mask.slice(0, 4);
MutableArrayRef<int> HiMask = Mask.slice(4, 4);
// Attempt to directly match PSHUFLW or PSHUFHW.
if (isUndefOrInRange(LoMask, 0, 4) &&
isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
}
if (isUndefOrInRange(HiMask, 4, 8) &&
isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
for (int i = 0; i != 4; ++i)
HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
}
SmallVector<int, 4> LoInputs;
copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
array_pod_sort(LoInputs.begin(), LoInputs.end());
LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
SmallVector<int, 4> HiInputs;
copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
array_pod_sort(HiInputs.begin(), HiInputs.end());
HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
int NumLToL =
std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
int NumHToL = LoInputs.size() - NumLToL;
int NumLToH =
std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
int NumHToH = HiInputs.size() - NumLToH;
MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
// If we are shuffling values from one half - check how many different DWORD
// pairs we need to create. If only 1 or 2 then we can perform this as a
// PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
V = DAG.getNode(ShufWOp, DL, VT, V,
getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
V = DAG.getBitcast(PSHUFDVT, V);
V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
return DAG.getBitcast(VT, V);
};
if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
int PSHUFDMask[4] = { -1, -1, -1, -1 };
SmallVector<std::pair<int, int>, 4> DWordPairs;
int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
// Collect the different DWORD pairs.
for (int DWord = 0; DWord != 4; ++DWord) {
int M0 = Mask[2 * DWord + 0];
int M1 = Mask[2 * DWord + 1];
M0 = (M0 >= 0 ? M0 % 4 : M0);
M1 = (M1 >= 0 ? M1 % 4 : M1);
if (M0 < 0 && M1 < 0)
continue;
bool Match = false;
for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
auto &DWordPair = DWordPairs[j];
if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
(M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
PSHUFDMask[DWord] = DOffset + j;
Match = true;
break;
}
}
if (!Match) {
PSHUFDMask[DWord] = DOffset + DWordPairs.size();
DWordPairs.push_back(std::make_pair(M0, M1));
}
}
if (DWordPairs.size() <= 2) {
DWordPairs.resize(2, std::make_pair(-1, -1));
int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
DWordPairs[1].first, DWordPairs[1].second};
if ((NumHToL + NumHToH) == 0)
return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
if ((NumLToL + NumLToH) == 0)
return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
}
}
// Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
// such inputs we can swap two of the dwords across the half mark and end up
// with <=2 inputs to each half in each half. Once there, we can fall through
// to the generic code below. For example:
//
// Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
// Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
//
// However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
// and an existing 2-into-2 on the other half. In this case we may have to
// pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
// 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
// Fortunately, we don't have to handle anything but a 2-into-2 pattern
// because any other situation (including a 3-into-1 or 1-into-3 in the other
// half than the one we target for fixing) will be fixed when we re-enter this
// path. We will also combine away any sequence of PSHUFD instructions that
// result into a single instruction. Here is an example of the tricky case:
//
// Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
// Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
//
// This now has a 1-into-3 in the high half! Instead, we do two shuffles:
//
// Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
// Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
//
// Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
// Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
//
// The result is fine to be handled by the generic logic.
auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
int AOffset, int BOffset) {
assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
"Must call this with A having 3 or 1 inputs from the A half.");
assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
"Must call this with B having 1 or 3 inputs from the B half.");
assert(AToAInputs.size() + BToAInputs.size() == 4 &&
"Must call this with either 3:1 or 1:3 inputs (summing to 4).");
bool ThreeAInputs = AToAInputs.size() == 3;
// Compute the index of dword with only one word among the three inputs in
// a half by taking the sum of the half with three inputs and subtracting
// the sum of the actual three inputs. The difference is the remaining
// slot.
int ADWord, BDWord;
int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
int TripleNonInputIdx =
TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
TripleDWord = TripleNonInputIdx / 2;
// We use xor with one to compute the adjacent DWord to whichever one the
// OneInput is in.
OneInputDWord = (OneInput / 2) ^ 1;
// Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
// and BToA inputs. If there is also such a problem with the BToB and AToB
// inputs, we don't try to fix it necessarily -- we'll recurse and see it in
// the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
// is essential that we don't *create* a 3<-1 as then we might oscillate.
if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
// Compute how many inputs will be flipped by swapping these DWords. We
// need
// to balance this to ensure we don't form a 3-1 shuffle in the other
// half.
int NumFlippedAToBInputs =
std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
int NumFlippedBToBInputs =
std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
if ((NumFlippedAToBInputs == 1 &&
(NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
(NumFlippedBToBInputs == 1 &&
(NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
// We choose whether to fix the A half or B half based on whether that
// half has zero flipped inputs. At zero, we may not be able to fix it
// with that half. We also bias towards fixing the B half because that
// will more commonly be the high half, and we have to bias one way.
auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
ArrayRef<int> Inputs) {
int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
// Determine whether the free index is in the flipped dword or the
// unflipped dword based on where the pinned index is. We use this bit
// in an xor to conditionally select the adjacent dword.
int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
if (IsFixIdxInput == IsFixFreeIdxInput)
FixFreeIdx += 1;
IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
assert(IsFixIdxInput != IsFixFreeIdxInput &&
"We need to be changing the number of flipped inputs!");
int PSHUFHalfMask[] = {0, 1, 2, 3};
std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
V = DAG.getNode(
FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
for (int &M : Mask)
if (M >= 0 && M == FixIdx)
M = FixFreeIdx;
else if (M >= 0 && M == FixFreeIdx)
M = FixIdx;
};
if (NumFlippedBToBInputs != 0) {
int BPinnedIdx =
BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
} else {
assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
}
}
}
int PSHUFDMask[] = {0, 1, 2, 3};
PSHUFDMask[ADWord] = BDWord;
PSHUFDMask[BDWord] = ADWord;
V = DAG.getBitcast(
VT,
DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
// Adjust the mask to match the new locations of A and B.
for (int &M : Mask)
if (M >= 0 && M/2 == ADWord)
M = 2 * BDWord + M % 2;
else if (M >= 0 && M/2 == BDWord)
M = 2 * ADWord + M % 2;
// Recurse back into this routine to re-compute state now that this isn't
// a 3 and 1 problem.
return lowerV8I16GeneralSingleInputVectorShuffle(DL, VT, V, Mask, Subtarget,
DAG);
};
if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
// At this point there are at most two inputs to the low and high halves from
// each half. That means the inputs can always be grouped into dwords and
// those dwords can then be moved to the correct half with a dword shuffle.
// We use at most one low and one high word shuffle to collect these paired
// inputs into dwords, and finally a dword shuffle to place them.
int PSHUFLMask[4] = {-1, -1, -1, -1};
int PSHUFHMask[4] = {-1, -1, -1, -1};
int PSHUFDMask[4] = {-1, -1, -1, -1};
// First fix the masks for all the inputs that are staying in their
// original halves. This will then dictate the targets of the cross-half
// shuffles.
auto fixInPlaceInputs =
[&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
MutableArrayRef<int> SourceHalfMask,
MutableArrayRef<int> HalfMask, int HalfOffset) {
if (InPlaceInputs.empty())
return;
if (InPlaceInputs.size() == 1) {
SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
InPlaceInputs[0] - HalfOffset;
PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
return;
}
if (IncomingInputs.empty()) {
// Just fix all of the in place inputs.
for (int Input : InPlaceInputs) {
SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
PSHUFDMask[Input / 2] = Input / 2;
}
return;
}
assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
InPlaceInputs[0] - HalfOffset;
// Put the second input next to the first so that they are packed into
// a dword. We find the adjacent index by toggling the low bit.
int AdjIndex = InPlaceInputs[0] ^ 1;
SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
};
fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
// Now gather the cross-half inputs and place them into a free dword of
// their target half.
// FIXME: This operation could almost certainly be simplified dramatically to
// look more like the 3-1 fixing operation.
auto moveInputsToRightHalf = [&PSHUFDMask](
MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
int DestOffset) {
auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
};
auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
int Word) {
int LowWord = Word & ~1;
int HighWord = Word | 1;
return isWordClobbered(SourceHalfMask, LowWord) ||
isWordClobbered(SourceHalfMask, HighWord);
};
if (IncomingInputs.empty())
return;
if (ExistingInputs.empty()) {
// Map any dwords with inputs from them into the right half.
for (int Input : IncomingInputs) {
// If the source half mask maps over the inputs, turn those into
// swaps and use the swapped lane.
if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
Input - SourceOffset;
// We have to swap the uses in our half mask in one sweep.
for (int &M : HalfMask)
if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
M = Input;
else if (M == Input)
M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
} else {
assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
Input - SourceOffset &&
"Previous placement doesn't match!");
}
// Note that this correctly re-maps both when we do a swap and when
// we observe the other side of the swap above. We rely on that to
// avoid swapping the members of the input list directly.
Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
}
// Map the input's dword into the correct half.
if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
else
assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
Input / 2 &&
"Previous placement doesn't match!");
}
// And just directly shift any other-half mask elements to be same-half
// as we will have mirrored the dword containing the element into the
// same position within that half.
for (int &M : HalfMask)
if (M >= SourceOffset && M < SourceOffset + 4) {
M = M - SourceOffset + DestOffset;
assert(M >= 0 && "This should never wrap below zero!");
}
return;
}
// Ensure we have the input in a viable dword of its current half. This
// is particularly tricky because the original position may be clobbered
// by inputs being moved and *staying* in that half.
if (IncomingInputs.size() == 1) {
if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
SourceOffset;
SourceHalfMask[InputFixed - SourceOffset] =
IncomingInputs[0] - SourceOffset;
std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
InputFixed);
IncomingInputs[0] = InputFixed;
}
} else if (IncomingInputs.size() == 2) {
if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
// We have two non-adjacent or clobbered inputs we need to extract from
// the source half. To do this, we need to map them into some adjacent
// dword slot in the source mask.
int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
IncomingInputs[1] - SourceOffset};
// If there is a free slot in the source half mask adjacent to one of
// the inputs, place the other input in it. We use (Index XOR 1) to
// compute an adjacent index.
if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
InputsFixed[1] = InputsFixed[0] ^ 1;
} else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
InputsFixed[0] = InputsFixed[1] ^ 1;
} else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
// The two inputs are in the same DWord but it is clobbered and the
// adjacent DWord isn't used at all. Move both inputs to the free
// slot.
SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
} else {
// The only way we hit this point is if there is no clobbering
// (because there are no off-half inputs to this half) and there is no
// free slot adjacent to one of the inputs. In this case, we have to
// swap an input with a non-input.
for (int i = 0; i < 4; ++i)
assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
"We can't handle any clobbers here!");
assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
"Cannot have adjacent inputs here!");
SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
// We also have to update the final source mask in this case because
// it may need to undo the above swap.
for (int &M : FinalSourceHalfMask)
if (M == (InputsFixed[0] ^ 1) + SourceOffset)
M = InputsFixed[1] + SourceOffset;
else if (M == InputsFixed[1] + SourceOffset)
M = (InputsFixed[0] ^ 1) + SourceOffset;
InputsFixed[1] = InputsFixed[0] ^ 1;
}
// Point everything at the fixed inputs.
for (int &M : HalfMask)
if (M == IncomingInputs[0])
M = InputsFixed[0] + SourceOffset;
else if (M == IncomingInputs[1])
M = InputsFixed[1] + SourceOffset;
IncomingInputs[0] = InputsFixed[0] + SourceOffset;
IncomingInputs[1] = InputsFixed[1] + SourceOffset;
}
} else {
llvm_unreachable("Unhandled input size!");
}
// Now hoist the DWord down to the right half.
int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
for (int &M : HalfMask)
for (int Input : IncomingInputs)
if (M == Input)
M = FreeDWord * 2 + Input % 2;
};
moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
/*SourceOffset*/ 4, /*DestOffset*/ 0);
moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
/*SourceOffset*/ 0, /*DestOffset*/ 4);
// Now enact all the shuffles we've computed to move the inputs into their
// target half.
if (!isNoopShuffleMask(PSHUFLMask))
V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
if (!isNoopShuffleMask(PSHUFHMask))
V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
if (!isNoopShuffleMask(PSHUFDMask))
V = DAG.getBitcast(
VT,
DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
// At this point, each half should contain all its inputs, and we can then
// just shuffle them into their final position.
assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
"Failed to lift all the high half inputs to the low mask!");
assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
"Failed to lift all the low half inputs to the high mask!");
// Do a half shuffle for the low mask.
if (!isNoopShuffleMask(LoMask))
V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
// Do a half shuffle with the high mask after shifting its values down.
for (int &M : HiMask)
if (M >= 0)
M -= 4;
if (!isNoopShuffleMask(HiMask))
V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
return V;
}
/// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
/// blend if only one input is used.
static SDValue lowerVectorShuffleAsBlendOfPSHUFBs(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse,
bool &V2InUse) {
SDValue V1Mask[16];
SDValue V2Mask[16];
V1InUse = false;
V2InUse = false;
int Size = Mask.size();
int Scale = 16 / Size;
for (int i = 0; i < 16; ++i) {
if (Mask[i / Scale] < 0) {
V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
} else {
const int ZeroMask = 0x80;
int V1Idx = Mask[i / Scale] < Size ? Mask[i / Scale] * Scale + i % Scale
: ZeroMask;
int V2Idx = Mask[i / Scale] < Size
? ZeroMask
: (Mask[i / Scale] - Size) * Scale + i % Scale;
if (Zeroable[i / Scale])
V1Idx = V2Idx = ZeroMask;
V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
V1InUse |= (ZeroMask != V1Idx);
V2InUse |= (ZeroMask != V2Idx);
}
}
if (V1InUse)
V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
DAG.getBitcast(MVT::v16i8, V1),
DAG.getBuildVector(MVT::v16i8, DL, V1Mask));
if (V2InUse)
V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
DAG.getBitcast(MVT::v16i8, V2),
DAG.getBuildVector(MVT::v16i8, DL, V2Mask));
// If we need shuffled inputs from both, blend the two.
SDValue V;
if (V1InUse && V2InUse)
V = DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
else
V = V1InUse ? V1 : V2;
// Cast the result back to the correct type.
return DAG.getBitcast(VT, V);
}
/// Generic lowering of 8-lane i16 shuffles.
///
/// This handles both single-input shuffles and combined shuffle/blends with
/// two inputs. The single input shuffles are immediately delegated to
/// a dedicated lowering routine.
///
/// The blends are lowered in one of three fundamental ways. If there are few
/// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
/// of the input is significantly cheaper when lowered as an interleaving of
/// the two inputs, try to interleave them. Otherwise, blend the low and high
/// halves of the inputs separately (making them have relatively few inputs)
/// and then concatenate them.
static SDValue lowerV8I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
if (NumV2Inputs == 0) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
return V;
// Use dedicated pack instructions for masks that match their pattern.
if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2,
DAG, Subtarget))
return V;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v8i16, V1, V1,
Mask, Subtarget, DAG))
return Rotate;
// Make a copy of the mask so it can be modified.
SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
return lowerV8I16GeneralSingleInputVectorShuffle(DL, MVT::v8i16, V1,
MutableMask, Subtarget,
DAG);
}
assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
"All single-input shuffles should be canonicalized to be V1-input "
"shuffles.");
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// See if we can use SSE4A Extraction / Insertion.
if (Subtarget.hasSSE4A())
if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
Zeroable, DAG))
return V;
// There are special ways we can lower some single-element blends.
if (NumV2Inputs == 1)
if (SDValue V = lowerVectorShuffleAsElementInsertion(
DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
return V;
// We have different paths for blend lowering, but they all must use the
// *exact* same predicate.
bool IsBlendSupported = Subtarget.hasSSE41();
if (IsBlendSupported)
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
Zeroable, DAG))
return Masked;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
return V;
// Use dedicated pack instructions for masks that match their pattern.
if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
Subtarget))
return V;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
return Rotate;
if (SDValue BitBlend =
lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
return BitBlend;
// Try to lower by permuting the inputs into an unpack instruction.
if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1,
V2, Mask, DAG))
return Unpack;
// If we can't directly blend but can use PSHUFB, that will be better as it
// can both shuffle and set up the inefficient blend.
if (!IsBlendSupported && Subtarget.hasSSSE3()) {
bool V1InUse, V2InUse;
return lowerVectorShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
Zeroable, DAG, V1InUse, V2InUse);
}
// We can always bit-blend if we have to so the fallback strategy is to
// decompose into single-input permutes and blends.
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
Mask, DAG);
}
/// Check whether a compaction lowering can be done by dropping even
/// elements and compute how many times even elements must be dropped.
///
/// This handles shuffles which take every Nth element where N is a power of
/// two. Example shuffle masks:
///
/// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
/// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
/// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
/// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
/// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
/// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
///
/// Any of these lanes can of course be undef.
///
/// This routine only supports N <= 3.
/// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
/// for larger N.
///
/// \returns N above, or the number of times even elements must be dropped if
/// there is such a number. Otherwise returns zero.
static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
bool IsSingleInput) {
// The modulus for the shuffle vector entries is based on whether this is
// a single input or not.
int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
"We should only be called with masks with a power-of-2 size!");
uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
// We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
// and 2^3 simultaneously. This is because we may have ambiguity with
// partially undef inputs.
bool ViableForN[3] = {true, true, true};
for (int i = 0, e = Mask.size(); i < e; ++i) {
// Ignore undef lanes, we'll optimistically collapse them to the pattern we
// want.
if (Mask[i] < 0)
continue;
bool IsAnyViable = false;
for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
if (ViableForN[j]) {
uint64_t N = j + 1;
// The shuffle mask must be equal to (i * 2^N) % M.
if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
IsAnyViable = true;
else
ViableForN[j] = false;
}
// Early exit if we exhaust the possible powers of two.
if (!IsAnyViable)
break;
}
for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
if (ViableForN[j])
return j + 1;
// Return 0 as there is no viable power of two.
return 0;
}
static SDValue lowerVectorShuffleWithPERMV(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
if (V2.isUndef())
return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
}
/// Generic lowering of v16i8 shuffles.
///
/// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
/// detect any complexity reducing interleaving. If that doesn't help, it uses
/// UNPCK to spread the i8 elements across two i16-element vectors, and uses
/// the existing lowering for v8i16 blends on each half, finally PACK-ing them
/// back together.
static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
return Rotate;
// Use dedicated pack instructions for masks that match their pattern.
if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
Subtarget))
return V;
// Try to use a zext lowering.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// See if we can use SSE4A Extraction / Insertion.
if (Subtarget.hasSSE4A())
if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
Zeroable, DAG))
return V;
int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
// For single-input shuffles, there are some nicer lowering tricks we can use.
if (NumV2Elements == 0) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Check whether we can widen this to an i16 shuffle by duplicating bytes.
// Notably, this handles splat and partial-splat shuffles more efficiently.
// However, it only makes sense if the pre-duplication shuffle simplifies
// things significantly. Currently, this means we need to be able to
// express the pre-duplication shuffle as an i16 shuffle.
//
// FIXME: We should check for other patterns which can be widened into an
// i16 shuffle as well.
auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
for (int i = 0; i < 16; i += 2)
if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
return false;
return true;
};
auto tryToWidenViaDuplication = [&]() -> SDValue {
if (!canWidenViaDuplication(Mask))
return SDValue();
SmallVector<int, 4> LoInputs;
copy_if(Mask, std::back_inserter(LoInputs),
[](int M) { return M >= 0 && M < 8; });
array_pod_sort(LoInputs.begin(), LoInputs.end());
LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
LoInputs.end());
SmallVector<int, 4> HiInputs;
copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
array_pod_sort(HiInputs.begin(), HiInputs.end());
HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
HiInputs.end());
bool TargetLo = LoInputs.size() >= HiInputs.size();
ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
SmallDenseMap<int, int, 8> LaneMap;
for (int I : InPlaceInputs) {
PreDupI16Shuffle[I/2] = I/2;
LaneMap[I] = I;
}
int j = TargetLo ? 0 : 4, je = j + 4;
for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
// Check if j is already a shuffle of this input. This happens when
// there are two adjacent bytes after we move the low one.
if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
// If we haven't yet mapped the input, search for a slot into which
// we can map it.
while (j < je && PreDupI16Shuffle[j] >= 0)
++j;
if (j == je)
// We can't place the inputs into a single half with a simple i16 shuffle, so bail.
return SDValue();
// Map this input with the i16 shuffle.
PreDupI16Shuffle[j] = MovingInputs[i] / 2;
}
// Update the lane map based on the mapping we ended up with.
LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
}
V1 = DAG.getBitcast(
MVT::v16i8,
DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
// Unpack the bytes to form the i16s that will be shuffled into place.
V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
MVT::v16i8, V1, V1);
int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
for (int i = 0; i < 16; ++i)
if (Mask[i] >= 0) {
int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
if (PostDupI16Shuffle[i / 2] < 0)
PostDupI16Shuffle[i / 2] = MappedMask;
else
assert(PostDupI16Shuffle[i / 2] == MappedMask &&
"Conflicting entries in the original shuffle!");
}
return DAG.getBitcast(
MVT::v16i8,
DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
};
if (SDValue V = tryToWidenViaDuplication())
return V;
}
if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
Zeroable, DAG))
return Masked;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
return V;
// Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
// with PSHUFB. It is important to do this before we attempt to generate any
// blends but after all of the single-input lowerings. If the single input
// lowerings can find an instruction sequence that is faster than a PSHUFB, we
// want to preserve that and we can DAG combine any longer sequences into
// a PSHUFB in the end. But once we start blending from multiple inputs,
// the complexity of DAG combining bad patterns back into PSHUFB is too high,
// and there are *very* few patterns that would actually be faster than the
// PSHUFB approach because of its ability to zero lanes.
//
// FIXME: The only exceptions to the above are blends which are exact
// interleavings with direct instructions supporting them. We currently don't
// handle those well here.
if (Subtarget.hasSSSE3()) {
bool V1InUse = false;
bool V2InUse = false;
SDValue PSHUFB = lowerVectorShuffleAsBlendOfPSHUFBs(
DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
// If both V1 and V2 are in use and we can use a direct blend or an unpack,
// do so. This avoids using them to handle blends-with-zero which is
// important as a single pshufb is significantly faster for that.
if (V1InUse && V2InUse) {
if (Subtarget.hasSSE41())
if (SDValue Blend = lowerVectorShuffleAsBlend(
DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
return Blend;
// We can use an unpack to do the blending rather than an or in some
// cases. Even though the or may be (very minorly) more efficient, we
// preference this lowering because there are common cases where part of
// the complexity of the shuffles goes away when we do the final blend as
// an unpack.
// FIXME: It might be worth trying to detect if the unpack-feeding
// shuffles will both be pshufb, in which case we shouldn't bother with
// this.
if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(
DL, MVT::v16i8, V1, V2, Mask, DAG))
return Unpack;
// If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
if (Subtarget.hasVBMI() && Subtarget.hasVLX())
return lowerVectorShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG);
}
return PSHUFB;
}
// There are special ways we can lower some single-element blends.
if (NumV2Elements == 1)
if (SDValue V = lowerVectorShuffleAsElementInsertion(
DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
return V;
if (SDValue BitBlend =
lowerVectorShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
return BitBlend;
// Check whether a compaction lowering can be done. This handles shuffles
// which take every Nth element for some even N. See the helper function for
// details.
//
// We special case these as they can be particularly efficiently handled with
// the PACKUSB instruction on x86 and they show up in common patterns of
// rearranging bytes to truncate wide elements.
bool IsSingleInput = V2.isUndef();
if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
// NumEvenDrops is the power of two stride of the elements. Another way of
// thinking about it is that we need to drop the even elements this many
// times to get the original input.
// First we need to zero all the dropped bytes.
assert(NumEvenDrops <= 3 &&
"No support for dropping even elements more than 3 times.");
// We use the mask type to pick which bytes are preserved based on how many
// elements are dropped.
MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
SDValue ByteClearMask = DAG.getBitcast(
MVT::v16i8, DAG.getConstant(0xFF, DL, MaskVTs[NumEvenDrops - 1]));
V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
if (!IsSingleInput)
V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
// Now pack things back together.
V1 = DAG.getBitcast(MVT::v8i16, V1);
V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
for (int i = 1; i < NumEvenDrops; ++i) {
Result = DAG.getBitcast(MVT::v8i16, Result);
Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
}
return Result;
}
// Handle multi-input cases by blending single-input shuffles.
if (NumV2Elements > 0)
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2,
Mask, DAG);
// The fallback path for single-input shuffles widens this into two v8i16
// vectors with unpacks, shuffles those, and then pulls them back together
// with a pack.
SDValue V = V1;
std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
for (int i = 0; i < 16; ++i)
if (Mask[i] >= 0)
(i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
SDValue VLoHalf, VHiHalf;
// Check if any of the odd lanes in the v16i8 are used. If not, we can mask
// them out and avoid using UNPCK{L,H} to extract the elements of V as
// i16s.
if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
// Use a mask to drop the high bytes.
VLoHalf = DAG.getBitcast(MVT::v8i16, V);
VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
DAG.getConstant(0x00FF, DL, MVT::v8i16));
// This will be a single vector shuffle instead of a blend so nuke VHiHalf.
VHiHalf = DAG.getUNDEF(MVT::v8i16);
// Squash the masks to point directly into VLoHalf.
for (int &M : LoBlendMask)
if (M >= 0)
M /= 2;
for (int &M : HiBlendMask)
if (M >= 0)
M /= 2;
} else {
// Otherwise just unpack the low half of V into VLoHalf and the high half into
// VHiHalf so that we can blend them as i16s.
SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
VLoHalf = DAG.getBitcast(
MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
VHiHalf = DAG.getBitcast(
MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
}
SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
}
/// Dispatching routine to lower various 128-bit x86 vector shuffles.
///
/// This routine breaks down the specific type of 128-bit shuffle and
/// dispatches to the lowering routines accordingly.
static SDValue lower128BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
MVT VT, SDValue V1, SDValue V2,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
switch (VT.SimpleTy) {
case MVT::v2i64:
return lowerV2I64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v2f64:
return lowerV2F64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v4i32:
return lowerV4I32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v4f32:
return lowerV4F32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v8i16:
return lowerV8I16VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v16i8:
return lowerV16I8VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
default:
llvm_unreachable("Unimplemented!");
}
}
/// Generic routine to split vector shuffle into half-sized shuffles.
///
/// This routine just extracts two subvectors, shuffles them independently, and
/// then concatenates them back together. This should work effectively with all
/// AVX vector shuffle types.
static SDValue splitAndLowerVectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(VT.getSizeInBits() >= 256 &&
"Only for 256-bit or wider vector shuffles!");
assert(V1.getSimpleValueType() == VT && "Bad operand type!");
assert(V2.getSimpleValueType() == VT && "Bad operand type!");
ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
int NumElements = VT.getVectorNumElements();
int SplitNumElements = NumElements / 2;
MVT ScalarVT = VT.getVectorElementType();
MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
// Rather than splitting build-vectors, just build two narrower build
// vectors. This helps shuffling with splats and zeros.
auto SplitVector = [&](SDValue V) {
V = peekThroughBitcasts(V);
MVT OrigVT = V.getSimpleValueType();
int OrigNumElements = OrigVT.getVectorNumElements();
int OrigSplitNumElements = OrigNumElements / 2;
MVT OrigScalarVT = OrigVT.getVectorElementType();
MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
SDValue LoV, HiV;
auto *BV = dyn_cast<BuildVectorSDNode>(V);
if (!BV) {
LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
DAG.getIntPtrConstant(0, DL));
HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
DAG.getIntPtrConstant(OrigSplitNumElements, DL));
} else {
SmallVector<SDValue, 16> LoOps, HiOps;
for (int i = 0; i < OrigSplitNumElements; ++i) {
LoOps.push_back(BV->getOperand(i));
HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
}
LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
}
return std::make_pair(DAG.getBitcast(SplitVT, LoV),
DAG.getBitcast(SplitVT, HiV));
};
SDValue LoV1, HiV1, LoV2, HiV2;
std::tie(LoV1, HiV1) = SplitVector(V1);
std::tie(LoV2, HiV2) = SplitVector(V2);
// Now create two 4-way blends of these half-width vectors.
auto HalfBlend = [&](ArrayRef<int> HalfMask) {
bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
for (int i = 0; i < SplitNumElements; ++i) {
int M = HalfMask[i];
if (M >= NumElements) {
if (M >= NumElements + SplitNumElements)
UseHiV2 = true;
else
UseLoV2 = true;
V2BlendMask[i] = M - NumElements;
BlendMask[i] = SplitNumElements + i;
} else if (M >= 0) {
if (M >= SplitNumElements)
UseHiV1 = true;
else
UseLoV1 = true;
V1BlendMask[i] = M;
BlendMask[i] = i;
}
}
// Because the lowering happens after all combining takes place, we need to
// manually combine these blend masks as much as possible so that we create
// a minimal number of high-level vector shuffle nodes.
// First try just blending the halves of V1 or V2.
if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
return DAG.getUNDEF(SplitVT);
if (!UseLoV2 && !UseHiV2)
return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
if (!UseLoV1 && !UseHiV1)
return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
SDValue V1Blend, V2Blend;
if (UseLoV1 && UseHiV1) {
V1Blend =
DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
} else {
// We only use half of V1 so map the usage down into the final blend mask.
V1Blend = UseLoV1 ? LoV1 : HiV1;
for (int i = 0; i < SplitNumElements; ++i)
if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
}
if (UseLoV2 && UseHiV2) {
V2Blend =
DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
} else {
// We only use half of V2 so map the usage down into the final blend mask.
V2Blend = UseLoV2 ? LoV2 : HiV2;
for (int i = 0; i < SplitNumElements; ++i)
if (BlendMask[i] >= SplitNumElements)
BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
}
return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
};
SDValue Lo = HalfBlend(LoMask);
SDValue Hi = HalfBlend(HiMask);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
}
/// Either split a vector in halves or decompose the shuffles and the
/// blend.
///
/// This is provided as a good fallback for many lowerings of non-single-input
/// shuffles with more than one 128-bit lane. In those cases, we want to select
/// between splitting the shuffle into 128-bit components and stitching those
/// back together vs. extracting the single-input shuffles and blending those
/// results.
static SDValue lowerVectorShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(!V2.isUndef() && "This routine must not be used to lower single-input "
"shuffles as it could then recurse on itself.");
int Size = Mask.size();
// If this can be modeled as a broadcast of two elements followed by a blend,
// prefer that lowering. This is especially important because broadcasts can
// often fold with memory operands.
auto DoBothBroadcast = [&] {
int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
for (int M : Mask)
if (M >= Size) {
if (V2BroadcastIdx < 0)
V2BroadcastIdx = M - Size;
else if (M - Size != V2BroadcastIdx)
return false;
} else if (M >= 0) {
if (V1BroadcastIdx < 0)
V1BroadcastIdx = M;
else if (M != V1BroadcastIdx)
return false;
}
return true;
};
if (DoBothBroadcast())
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
DAG);
// If the inputs all stem from a single 128-bit lane of each input, then we
// split them rather than blending because the split will decompose to
// unusually few instructions.
int LaneCount = VT.getSizeInBits() / 128;
int LaneSize = Size / LaneCount;
SmallBitVector LaneInputs[2];
LaneInputs[0].resize(LaneCount, false);
LaneInputs[1].resize(LaneCount, false);
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0)
LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
// Otherwise, just fall back to decomposed shuffles and a blend. This requires
// that the decomposed single-input shuffles don't end up here.
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
}
/// Lower a vector shuffle crossing multiple 128-bit lanes as
/// a permutation and blend of those lanes.
///
/// This essentially blends the out-of-lane inputs to each lane into the lane
/// from a permuted copy of the vector. This lowering strategy results in four
/// instructions in the worst case for a single-input cross lane shuffle which
/// is lower than any other fully general cross-lane shuffle strategy I'm aware
/// of. Special cases for each particular shuffle pattern should be handled
/// prior to trying this lowering.
static SDValue lowerVectorShuffleAsLanePermuteAndBlend(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// FIXME: This should probably be generalized for 512-bit vectors as well.
assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
int Size = Mask.size();
int LaneSize = Size / 2;
// If there are only inputs from one 128-bit lane, splitting will in fact be
// less expensive. The flags track whether the given lane contains an element
// that crosses to another lane.
if (!Subtarget.hasAVX2()) {
bool LaneCrossing[2] = {false, false};
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
if (!LaneCrossing[0] || !LaneCrossing[1])
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
} else {
bool LaneUsed[2] = {false, false};
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0)
LaneUsed[(Mask[i] / LaneSize)] = true;
if (!LaneUsed[0] || !LaneUsed[1])
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
}
assert(V2.isUndef() &&
"This last part of this routine only works on single input shuffles");
SmallVector<int, 32> FlippedBlendMask(Size);
for (int i = 0; i < Size; ++i)
FlippedBlendMask[i] =
Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
? Mask[i]
: Mask[i] % LaneSize +
(i / LaneSize) * LaneSize + Size);
// Flip the vector, and blend the results which should now be in-lane.
MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
SDValue Flipped = DAG.getBitcast(PVT, V1);
Flipped = DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT),
{ 2, 3, 0, 1 });
Flipped = DAG.getBitcast(VT, Flipped);
return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
}
/// Handle lowering 2-lane 128-bit shuffles.
static SDValue lowerV2X128VectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
SDValue V2, ArrayRef<int> Mask,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
if (Subtarget.hasAVX2() && V2.isUndef())
return SDValue();
SmallVector<int, 4> WidenedMask;
if (!canWidenShuffleElements(Mask, Zeroable, WidenedMask))
return SDValue();
bool IsLowZero = (Zeroable & 0x3) == 0x3;
bool IsHighZero = (Zeroable & 0xc) == 0xc;
// Try to use an insert into a zero vector.
if (WidenedMask[0] == 0 && IsHighZero) {
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
getZeroVector(VT, Subtarget, DAG, DL), LoV,
DAG.getIntPtrConstant(0, DL));
}
// TODO: If minimizing size and one of the inputs is a zero vector and the
// the zero vector has only one use, we could use a VPERM2X128 to save the
// instruction bytes needed to explicitly generate the zero vector.
// Blends are faster and handle all the non-lane-crossing cases.
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// If either input operand is a zero vector, use VPERM2X128 because its mask
// allows us to replace the zero input with an implicit zero.
if (!IsLowZero && !IsHighZero) {
// Check for patterns which can be matched with a single insert of a 128-bit
// subvector.
bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
// With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
// this will likely become vinsertf128 which can't fold a 256-bit memop.
if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
OnlyUsesV1 ? V1 : V2,
DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
DAG.getIntPtrConstant(2, DL));
}
}
// Try to use SHUF128 if possible.
if (Subtarget.hasVLX()) {
if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
((WidenedMask[1] % 2) << 1);
return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
DAG.getConstant(PermMask, DL, MVT::i8));
}
}
}
// Otherwise form a 128-bit permutation. After accounting for undefs,
// convert the 64-bit shuffle mask selection values into 128-bit
// selection bits by dividing the indexes by 2 and shifting into positions
// defined by a vperm2*128 instruction's immediate control byte.
// The immediate permute control byte looks like this:
// [1:0] - select 128 bits from sources for low half of destination
// [2] - ignore
// [3] - zero low half of destination
// [5:4] - select 128 bits from sources for high half of destination
// [6] - ignore
// [7] - zero high half of destination
assert((WidenedMask[0] >= 0 || IsLowZero) &&
(WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
unsigned PermMask = 0;
PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
// Check the immediate mask and replace unused sources with undef.
if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
V1 = DAG.getUNDEF(VT);
if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
V2 = DAG.getUNDEF(VT);
return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
DAG.getConstant(PermMask, DL, MVT::i8));
}
/// Lower a vector shuffle by first fixing the 128-bit lanes and then
/// shuffling each lane.
///
/// This will only succeed when the result of fixing the 128-bit lanes results
/// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
/// each 128-bit lanes. This handles many cases where we can quickly blend away
/// the lane crosses early and then use simpler shuffles within each lane.
///
/// FIXME: It might be worthwhile at some point to support this without
/// requiring the 128-bit lane-relative shuffles to be repeating, but currently
/// in x86 only floating point has interesting non-repeating shuffles, and even
/// those are still *marginally* more expensive.
static SDValue lowerVectorShuffleByMerging128BitLanes(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const X86Subtarget &Subtarget, SelectionDAG &DAG) {
assert(!V2.isUndef() && "This is only useful with multiple inputs.");
int Size = Mask.size();
int LaneSize = 128 / VT.getScalarSizeInBits();
int NumLanes = Size / LaneSize;
assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
// See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
// check whether the in-128-bit lane shuffles share a repeating pattern.
SmallVector<int, 4> Lanes((unsigned)NumLanes, -1);
SmallVector<int, 4> InLaneMask((unsigned)LaneSize, -1);
for (int i = 0; i < Size; ++i) {
if (Mask[i] < 0)
continue;
int j = i / LaneSize;
if (Lanes[j] < 0) {
// First entry we've seen for this lane.
Lanes[j] = Mask[i] / LaneSize;
} else if (Lanes[j] != Mask[i] / LaneSize) {
// This doesn't match the lane selected previously!
return SDValue();
}
// Check that within each lane we have a consistent shuffle mask.
int k = i % LaneSize;
if (InLaneMask[k] < 0) {
InLaneMask[k] = Mask[i] % LaneSize;
} else if (InLaneMask[k] != Mask[i] % LaneSize) {
// This doesn't fit a repeating in-lane mask.
return SDValue();
}
}
// First shuffle the lanes into place.
MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
VT.getSizeInBits() / 64);
SmallVector<int, 8> LaneMask((unsigned)NumLanes * 2, -1);
for (int i = 0; i < NumLanes; ++i)
if (Lanes[i] >= 0) {
LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
}
V1 = DAG.getBitcast(LaneVT, V1);
V2 = DAG.getBitcast(LaneVT, V2);
SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
// Cast it back to the type we actually want.
LaneShuffle = DAG.getBitcast(VT, LaneShuffle);
// Now do a simple shuffle that isn't lane crossing.
SmallVector<int, 8> NewMask((unsigned)Size, -1);
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0)
NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
"Must not introduce lane crosses at this point!");
return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
}
/// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
/// This allows for fast cases such as subvector extraction/insertion
/// or shuffling smaller vector types which can lower more efficiently.
static SDValue lowerVectorShuffleWithUndefHalf(const SDLoc &DL, MVT VT,
SDValue V1, SDValue V2,
ArrayRef<int> Mask,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert((VT.is256BitVector() || VT.is512BitVector()) &&
"Expected 256-bit or 512-bit vector");
unsigned NumElts = VT.getVectorNumElements();
unsigned HalfNumElts = NumElts / 2;
MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), HalfNumElts);
bool UndefLower = isUndefInRange(Mask, 0, HalfNumElts);
bool UndefUpper = isUndefInRange(Mask, HalfNumElts, HalfNumElts);
if (!UndefLower && !UndefUpper)
return SDValue();
// Upper half is undef and lower half is whole upper subvector.
// e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
if (UndefUpper &&
isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
DAG.getIntPtrConstant(HalfNumElts, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
DAG.getIntPtrConstant(0, DL));
}
// Lower half is undef and upper half is whole lower subvector.
// e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
if (UndefLower &&
isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
DAG.getIntPtrConstant(HalfNumElts, DL));
}
// If the shuffle only uses two of the four halves of the input operands,
// then extract them and perform the 'half' shuffle at half width.
// e.g. vector_shuffle <X, X, X, X, u, u, u, u> or <X, X, u, u>
int HalfIdx1 = -1, HalfIdx2 = -1;
SmallVector<int, 8> HalfMask(HalfNumElts);
unsigned Offset = UndefLower ? HalfNumElts : 0;
for (unsigned i = 0; i != HalfNumElts; ++i) {
int M = Mask[i + Offset];
if (M < 0) {
HalfMask[i] = M;
continue;
}
// Determine which of the 4 half vectors this element is from.
// i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
int HalfIdx = M / HalfNumElts;
// Determine the element index into its half vector source.
int HalfElt = M % HalfNumElts;
// We can shuffle with up to 2 half vectors, set the new 'half'
// shuffle mask accordingly.
if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
HalfMask[i] = HalfElt;
HalfIdx1 = HalfIdx;
continue;
}
if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
HalfMask[i] = HalfElt + HalfNumElts;
HalfIdx2 = HalfIdx;
continue;
}
// Too many half vectors referenced.
return SDValue();
}
assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
// Only shuffle the halves of the inputs when useful.
int NumLowerHalves =
(HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
int NumUpperHalves =
(HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
// uuuuXXXX - don't extract uppers just to insert again.
if (UndefLower && NumUpperHalves != 0)
return SDValue();
// XXXXuuuu - don't extract both uppers, instead shuffle and then extract.
if (UndefUpper && NumUpperHalves == 2)
return SDValue();
// AVX2 - XXXXuuuu - always extract lowers.
if (Subtarget.hasAVX2() && !(UndefUpper && NumUpperHalves == 0)) {
// AVX2 supports efficient immediate 64-bit element cross-lane shuffles.
if (VT == MVT::v4f64 || VT == MVT::v4i64)
return SDValue();
// AVX2 supports variable 32-bit element cross-lane shuffles.
if (VT == MVT::v8f32 || VT == MVT::v8i32) {
// XXXXuuuu - don't extract lowers and uppers.
if (UndefUpper && NumLowerHalves != 0 && NumUpperHalves != 0)
return SDValue();
}
}
// AVX512 - XXXXuuuu - always extract lowers.
if (VT.is512BitVector() && !(UndefUpper && NumUpperHalves == 0))
return SDValue();
auto GetHalfVector = [&](int HalfIdx) {
if (HalfIdx < 0)
return DAG.getUNDEF(HalfVT);
SDValue V = (HalfIdx < 2 ? V1 : V2);
HalfIdx = (HalfIdx % 2) * HalfNumElts;
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
DAG.getIntPtrConstant(HalfIdx, DL));
};
SDValue Half1 = GetHalfVector(HalfIdx1);
SDValue Half2 = GetHalfVector(HalfIdx2);
SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
DAG.getIntPtrConstant(Offset, DL));
}
/// Test whether the specified input (0 or 1) is in-place blended by the
/// given mask.
///
/// This returns true if the elements from a particular input are already in the
/// slot required by the given mask and require no permutation.
static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
int Size = Mask.size();
for (int i = 0; i < Size; ++i)
if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
return false;
return true;
}
/// Handle case where shuffle sources are coming from the same 128-bit lane and
/// every lane can be represented as the same repeating mask - allowing us to
/// shuffle the sources with the repeating shuffle and then permute the result
/// to the destination lanes.
static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
const X86Subtarget &Subtarget, SelectionDAG &DAG) {
int NumElts = VT.getVectorNumElements();
int NumLanes = VT.getSizeInBits() / 128;
int NumLaneElts = NumElts / NumLanes;
// On AVX2 we may be able to just shuffle the lowest elements and then
// broadcast the result.
if (Subtarget.hasAVX2()) {
for (unsigned BroadcastSize : {16, 32, 64}) {
if (BroadcastSize <= VT.getScalarSizeInBits())
continue;
int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
// Attempt to match a repeating pattern every NumBroadcastElts,
// accounting for UNDEFs but only references the lowest 128-bit
// lane of the inputs.
auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
for (int i = 0; i != NumElts; i += NumBroadcastElts)
for (int j = 0; j != NumBroadcastElts; ++j) {
int M = Mask[i + j];
if (M < 0)
continue;
int &R = RepeatMask[j];
if (0 != ((M % NumElts) / NumLaneElts))
return false;
if (0 <= R && R != M)
return false;
R = M;
}
return true;
};
SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
if (!FindRepeatingBroadcastMask(RepeatMask))
continue;
// Shuffle the (lowest) repeated elements in place for broadcast.
SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
// Shuffle the actual broadcast.
SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
for (int i = 0; i != NumElts; i += NumBroadcastElts)
for (int j = 0; j != NumBroadcastElts; ++j)
BroadcastMask[i + j] = j;
return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
BroadcastMask);
}
}
// Bail if the shuffle mask doesn't cross 128-bit lanes.
if (!is128BitLaneCrossingShuffleMask(VT, Mask))
return SDValue();
// Bail if we already have a repeated lane shuffle mask.
SmallVector<int, 8> RepeatedShuffleMask;
if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
return SDValue();
// On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
// (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
int NumSubLanes = NumLanes * SubLaneScale;
int NumSubLaneElts = NumLaneElts / SubLaneScale;
// Check that all the sources are coming from the same lane and see if we can
// form a repeating shuffle mask (local to each sub-lane). At the same time,
// determine the source sub-lane for each destination sub-lane.
int TopSrcSubLane = -1;
SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
// Extract the sub-lane mask, check that it all comes from the same lane
// and normalize the mask entries to come from the first lane.
int SrcLane = -1;
SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
if (M < 0)
continue;
int Lane = (M % NumElts) / NumLaneElts;
if ((0 <= SrcLane) && (SrcLane != Lane))
return SDValue();
SrcLane = Lane;
int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
SubLaneMask[Elt] = LocalM;
}
// Whole sub-lane is UNDEF.
if (SrcLane < 0)
continue;
// Attempt to match against the candidate repeated sub-lane masks.
for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
for (int i = 0; i != NumSubLaneElts; ++i) {
if (M1[i] < 0 || M2[i] < 0)
continue;
if (M1[i] != M2[i])
return false;
}
return true;
};
auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
continue;
// Merge the sub-lane mask into the matching repeated sub-lane mask.
for (int i = 0; i != NumSubLaneElts; ++i) {
int M = SubLaneMask[i];
if (M < 0)
continue;
assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
"Unexpected mask element");
RepeatedSubLaneMask[i] = M;
}
// Track the top most source sub-lane - by setting the remaining to UNDEF
// we can greatly simplify shuffle matching.
int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
break;
}
// Bail if we failed to find a matching repeated sub-lane mask.
if (Dst2SrcSubLanes[DstSubLane] < 0)
return SDValue();
}
assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
"Unexpected source lane");
// Create a repeating shuffle mask for the entire vector.
SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
int Lane = SubLane / SubLaneScale;
auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
int M = RepeatedSubLaneMask[Elt];
if (M < 0)
continue;
int Idx = (SubLane * NumSubLaneElts) + Elt;
RepeatedMask[Idx] = M + (Lane * NumLaneElts);
}
}
SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
// Shuffle each source sub-lane to its destination.
SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
for (int i = 0; i != NumElts; i += NumSubLaneElts) {
int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
if (SrcSubLane < 0)
continue;
for (int j = 0; j != NumSubLaneElts; ++j)
SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
}
return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
SubLaneMask);
}
static bool matchVectorShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
unsigned &ShuffleImm,
ArrayRef<int> Mask) {
int NumElts = VT.getVectorNumElements();
assert(VT.getScalarSizeInBits() == 64 &&
(NumElts == 2 || NumElts == 4 || NumElts == 8) &&
"Unexpected data type for VSHUFPD");
// Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
// Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
ShuffleImm = 0;
bool ShufpdMask = true;
bool CommutableMask = true;
for (int i = 0; i < NumElts; ++i) {
if (Mask[i] == SM_SentinelUndef)
continue;
if (Mask[i] < 0)
return false;
int Val = (i & 6) + NumElts * (i & 1);
int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
if (Mask[i] < Val || Mask[i] > Val + 1)
ShufpdMask = false;
if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
CommutableMask = false;
ShuffleImm |= (Mask[i] % 2) << i;
}
if (ShufpdMask)
return true;
if (CommutableMask) {
std::swap(V1, V2);
return true;
}
return false;
}
static SDValue lowerVectorShuffleWithSHUFPD(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64)&&
"Unexpected data type for VSHUFPD");
unsigned Immediate = 0;
if (!matchVectorShuffleWithSHUFPD(VT, V1, V2, Immediate, Mask))
return SDValue();
return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
DAG.getConstant(Immediate, DL, MVT::i8));
}
/// Handle lowering of 4-lane 64-bit floating point shuffles.
///
/// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
/// isn't available.
static SDValue lowerV4F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return V;
if (V2.isUndef()) {
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(
DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Use low duplicate instructions for masks that match their pattern.
if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
// Non-half-crossing single input shuffles can be lowered with an
// interleaved permutation.
unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
DAG.getConstant(VPERMILPMask, DL, MVT::i8));
}
// With AVX2 we have direct support for this permutation.
if (Subtarget.hasAVX2())
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return V;
// Otherwise, fall back.
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
DAG, Subtarget);
}
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Check if the blend happens to exactly fit that of SHUFPD.
if (SDValue Op =
lowerVectorShuffleWithSHUFPD(DL, MVT::v4f64, Mask, V1, V2, DAG))
return Op;
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return V;
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle. However, if we have AVX2 and either inputs are already in place,
// we will be able to shuffle even across lanes the other input in a single
// instruction so skip this pattern.
if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
isShuffleMaskInputInPlace(1, Mask))))
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
return Result;
// If we have VLX support, we can use VEXPAND.
if (Subtarget.hasVLX())
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask,
V1, V2, DAG, Subtarget))
return V;
// If we have AVX2 then we always want to lower with a blend because an v4 we
// can fully permute the elements.
if (Subtarget.hasAVX2())
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
Mask, DAG);
// Otherwise fall back on generic lowering.
return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
}
/// Handle lowering of 4-lane 64-bit integer shuffles.
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v4i64 shuffling..
static SDValue lowerV4I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i64, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
if (V2.isUndef()) {
// When the shuffle is mirrored between the 128-bit lanes of the unit, we
// can use lower latency instructions that will operate on both lanes.
SmallVector<int, 2> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
SmallVector<int, 4> PSHUFDMask;
scaleShuffleMask<int>(2, RepeatedMask, PSHUFDMask);
return DAG.getBitcast(
MVT::v4i64,
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
DAG.getBitcast(MVT::v8i32, V1),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
}
// AVX2 provides a direct instruction for permuting a single input across
// lanes.
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
}
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// If we have VLX support, we can use VALIGN or VEXPAND.
if (Subtarget.hasVLX()) {
if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v4i64, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask,
V1, V2, DAG, Subtarget))
return V;
}
// Try to use PALIGNR.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v4i64, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
return V;
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
return V;
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle. However, if we have AVX2 and either inputs are already in place,
// we will be able to shuffle even across lanes the other input in a single
// instruction so skip this pattern.
if (!isShuffleMaskInputInPlace(0, Mask) &&
!isShuffleMaskInputInPlace(1, Mask))
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
return Result;
// Otherwise fall back on generic blend lowering.
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
Mask, DAG);
}
/// Handle lowering of 8-lane 32-bit floating point shuffles.
///
/// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
/// isn't available.
static SDValue lowerV8F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8f32, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
// If the shuffle mask is repeated in each 128-bit lane, we have many more
// options to efficiently lower the shuffle.
SmallVector<int, 4> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
assert(RepeatedMask.size() == 4 &&
"Repeated masks must be half the mask width!");
// Use even/odd duplicate instructions for masks that match their pattern.
if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
if (V2.isUndef())
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
return V;
// Otherwise, fall back to a SHUFPS sequence. Here it is important that we
// have already handled any direct blends.
return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
}
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
return V;
// If we have a single input shuffle with different shuffle patterns in the
// two 128-bit lanes use the variable mask to VPERMILPS.
if (V2.isUndef()) {
SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
if (Subtarget.hasAVX2())
return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
// Otherwise, fall back.
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
DAG, Subtarget);
}
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
return Result;
// If we have VLX support, we can use VEXPAND.
if (Subtarget.hasVLX())
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask,
V1, V2, DAG, Subtarget))
return V;
// For non-AVX512 if the Mask is of 16bit elements in lane then try to split
// since after split we get a more efficient code using vpunpcklwd and
// vpunpckhwd instrs than vblend.
if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
if (SDValue V = lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2,
Mask, DAG))
return V;
// If we have AVX2 then we always want to lower with a blend because at v8 we
// can fully permute the elements.
if (Subtarget.hasAVX2())
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
Mask, DAG);
// Otherwise fall back on generic lowering.
return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
}
/// Handle lowering of 8-lane 32-bit integer shuffles.
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v8i32 shuffling..
static SDValue lowerV8I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v8i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// For non-AVX512 if the Mask is of 16bit elements in lane then try to split
// since after split we get a more efficient code than vblend by using
// vpunpcklwd and vpunpckhwd instrs.
if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
!Subtarget.hasAVX512())
if (SDValue V =
lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask, DAG))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i32, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
// If the shuffle mask is repeated in each 128-bit lane we can use more
// efficient instructions that mirror the shuffles across the two 128-bit
// lanes.
SmallVector<int, 4> RepeatedMask;
bool Is128BitLaneRepeatedShuffle =
is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
if (Is128BitLaneRepeatedShuffle) {
assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
if (V2.isUndef())
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
return V;
}
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// If we have VLX support, we can use VALIGN or EXPAND.
if (Subtarget.hasVLX()) {
if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v8i32, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask,
V1, V2, DAG, Subtarget))
return V;
}
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
return Rotate;
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
return V;
// If the shuffle patterns aren't repeated but it is a single input, directly
// generate a cross-lane VPERMD instruction.
if (V2.isUndef()) {
SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
}
// Assume that a single SHUFPS is faster than an alternative sequence of
// multiple instructions (even if the CPU has a domain penalty).
// If some CPU is harmed by the domain switch, we can fix it in a later pass.
if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
SDValue ShufPS = lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
CastV1, CastV2, DAG);
return DAG.getBitcast(MVT::v8i32, ShufPS);
}
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
return Result;
// Otherwise fall back on generic blend lowering.
return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
Mask, DAG);
}
/// Handle lowering of 16-lane 16-bit integer shuffles.
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v16i16 shuffling..
static SDValue lowerV16I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i16, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
return V;
// Use dedicated pack instructions for masks that match their pattern.
if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
Subtarget))
return V;
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
return Rotate;
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
return V;
if (V2.isUndef()) {
// There are no generalized cross-lane shuffle operations available on i16
// element types.
if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
Mask, DAG, Subtarget);
SmallVector<int, 8> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
// As this is a single-input shuffle, the repeated mask should be
// a strictly valid v8i16 mask that we can pass through to the v8i16
// lowering to handle even the v16 case.
return lowerV8I16GeneralSingleInputVectorShuffle(
DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
}
}
if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(
DL, MVT::v16i16, Mask, V1, V2, Zeroable, Subtarget, DAG))
return PSHUFB;
// AVX512BWVL can lower to VPERMW.
if (Subtarget.hasBWI() && Subtarget.hasVLX())
return lowerVectorShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
return Result;
// Otherwise fall back on generic lowering.
return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
}
/// Handle lowering of 32-lane 8-bit integer shuffles.
///
/// This routine is only called when we have AVX2 and thus a reasonable
/// instruction set for v32i8 shuffling..
static SDValue lowerV32I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v32i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// Check for being able to broadcast a single element.
if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v32i8, V1, V2,
Mask, Subtarget, DAG))
return Broadcast;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
return V;
// Use dedicated pack instructions for masks that match their pattern.
if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
Subtarget))
return V;
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
return Rotate;
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
return V;
// There are no generalized cross-lane shuffle operations available on i8
// element types.
if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2, Mask,
DAG, Subtarget);
if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(
DL, MVT::v32i8, Mask, V1, V2, Zeroable, Subtarget, DAG))
return PSHUFB;
// AVX512VBMIVL can lower to VPERMB.
if (Subtarget.hasVBMI() && Subtarget.hasVLX())
return lowerVectorShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG);
// Try to simplify this by merging 128-bit lanes to enable a lane-based
// shuffle.
if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
return Result;
// Otherwise fall back on generic lowering.
return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
}
/// High-level routine to lower various 256-bit x86 vector shuffles.
///
/// This routine either breaks down the specific type of a 256-bit x86 vector
/// shuffle or splits it into two 128-bit shuffles and fuses the results back
/// together based on the available instructions.
static SDValue lower256BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
MVT VT, SDValue V1, SDValue V2,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// If we have a single input to the zero element, insert that into V1 if we
// can do so cheaply.
int NumElts = VT.getVectorNumElements();
int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
if (NumV2Elements == 1 && Mask[0] >= NumElts)
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
return Insertion;
// Handle special cases where the lower or upper half is UNDEF.
if (SDValue V =
lowerVectorShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
return V;
// There is a really nice hard cut-over between AVX1 and AVX2 that means we
// can check for those subtargets here and avoid much of the subtarget
// querying in the per-vector-type lowering routines. With AVX1 we have
// essentially *zero* ability to manipulate a 256-bit vector with integer
// types. Since we'll use floating point types there eventually, just
// immediately cast everything to a float and operate entirely in that domain.
if (VT.isInteger() && !Subtarget.hasAVX2()) {
int ElementBits = VT.getScalarSizeInBits();
if (ElementBits < 32) {
// No floating point type available, if we can't use the bit operations
// for masking/blending then decompose into 128-bit vectors.
if (SDValue V =
lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable, DAG))
return V;
if (SDValue V = lowerVectorShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
return V;
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
}
MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
VT.getVectorNumElements());
V1 = DAG.getBitcast(FpVT, V1);
V2 = DAG.getBitcast(FpVT, V2);
return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
}
switch (VT.SimpleTy) {
case MVT::v4f64:
return lowerV4F64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v4i64:
return lowerV4I64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v8f32:
return lowerV8F32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v8i32:
return lowerV8I32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v16i16:
return lowerV16I16VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v32i8:
return lowerV32I8VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
default:
llvm_unreachable("Not a valid 256-bit x86 vector type!");
}
}
/// Try to lower a vector shuffle as a 128-bit shuffles.
static SDValue lowerV4X128VectorShuffle(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(VT.getScalarSizeInBits() == 64 &&
"Unexpected element type size for 128bit shuffle.");
// To handle 256 bit vector requires VLX and most probably
// function lowerV2X128VectorShuffle() is better solution.
assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
// TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
SmallVector<int, 4> WidenedMask;
if (!canWidenShuffleElements(Mask, WidenedMask))
return SDValue();
// Try to use an insert into a zero vector.
if (WidenedMask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
(WidenedMask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
getZeroVector(VT, Subtarget, DAG, DL), LoV,
DAG.getIntPtrConstant(0, DL));
}
// Check for patterns which can be matched with a single insert of a 256-bit
// subvector.
bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask,
{0, 1, 2, 3, 0, 1, 2, 3});
if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask,
{0, 1, 2, 3, 8, 9, 10, 11})) {
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
OnlyUsesV1 ? V1 : V2,
DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
DAG.getIntPtrConstant(4, DL));
}
assert(WidenedMask.size() == 4);
// See if this is an insertion of the lower 128-bits of V2 into V1.
bool IsInsert = true;
int V2Index = -1;
for (int i = 0; i < 4; ++i) {
assert(WidenedMask[i] >= -1);
if (WidenedMask[i] < 0)
continue;
// Make sure all V1 subvectors are in place.
if (WidenedMask[i] < 4) {
if (WidenedMask[i] != i) {
IsInsert = false;
break;
}
} else {
// Make sure we only have a single V2 index and its the lowest 128-bits.
if (V2Index >= 0 || WidenedMask[i] != 4) {
IsInsert = false;
break;
}
V2Index = i;
}
}
if (IsInsert && V2Index >= 0) {
MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
DAG.getIntPtrConstant(0, DL));
return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
}
// Try to lower to vshuf64x2/vshuf32x4.
SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
unsigned PermMask = 0;
// Insure elements came from the same Op.
for (int i = 0; i < 4; ++i) {
assert(WidenedMask[i] >= -1);
if (WidenedMask[i] < 0)
continue;
SDValue Op = WidenedMask[i] >= 4 ? V2 : V1;
unsigned OpIndex = i / 2;
if (Ops[OpIndex].isUndef())
Ops[OpIndex] = Op;
else if (Ops[OpIndex] != Op)
return SDValue();
// Convert the 128-bit shuffle mask selection values into 128-bit selection
// bits defined by a vshuf64x2 instruction's immediate control byte.
PermMask |= (WidenedMask[i] % 4) << (i * 2);
}
return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
DAG.getConstant(PermMask, DL, MVT::i8));
}
/// Handle lowering of 8-lane 64-bit floating point shuffles.
static SDValue lowerV8F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
if (V2.isUndef()) {
// Use low duplicate instructions for masks that match their pattern.
if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
// Non-half-crossing single input shuffles can be lowered with an
// interleaved permutation.
unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
DAG.getConstant(VPERMILPMask, DL, MVT::i8));
}
SmallVector<int, 4> RepeatedMask;
if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
}
if (SDValue Shuf128 =
lowerV4X128VectorShuffle(DL, MVT::v8f64, Mask, Zeroable, V1, V2,
Subtarget, DAG))
return Shuf128;
if (SDValue Unpck =
lowerVectorShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
return Unpck;
// Check if the blend happens to exactly fit that of SHUFPD.
if (SDValue Op =
lowerVectorShuffleWithSHUFPD(DL, MVT::v8f64, Mask, V1, V2, DAG))
return Op;
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1,
V2, DAG, Subtarget))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
return lowerVectorShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
}
/// Handle lowering of 16-lane 32-bit floating point shuffles.
static SDValue lowerV16F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
// If the shuffle mask is repeated in each 128-bit lane, we have many more
// options to efficiently lower the shuffle.
SmallVector<int, 4> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
// Use even/odd duplicate instructions for masks that match their pattern.
if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
if (V2.isUndef())
return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue Unpck =
lowerVectorShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
return Unpck;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// Otherwise, fall back to a SHUFPS sequence.
return lowerVectorShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
}
// If we have a single input shuffle with different shuffle patterns in the
// 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
if (V2.isUndef() &&
!is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
}
// If we have AVX512F support, we can use VEXPAND.
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
V1, V2, DAG, Subtarget))
return V;
return lowerVectorShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
}
/// Handle lowering of 8-lane 64-bit integer shuffles.
static SDValue lowerV8I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
if (V2.isUndef()) {
// When the shuffle is mirrored between the 128-bit lanes of the unit, we
// can use lower latency instructions that will operate on all four
// 128-bit lanes.
SmallVector<int, 2> Repeated128Mask;
if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
SmallVector<int, 4> PSHUFDMask;
scaleShuffleMask<int>(2, Repeated128Mask, PSHUFDMask);
return DAG.getBitcast(
MVT::v8i64,
DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
DAG.getBitcast(MVT::v16i32, V1),
getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
}
SmallVector<int, 4> Repeated256Mask;
if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
}
if (SDValue Shuf128 =
lowerV4X128VectorShuffle(DL, MVT::v8i64, Mask, Zeroable,
V1, V2, Subtarget, DAG))
return Shuf128;
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use VALIGN.
if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v8i64, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
// Try to use PALIGNR.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v8i64, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
if (SDValue Unpck =
lowerVectorShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
return Unpck;
// If we have AVX512F support, we can use VEXPAND.
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1,
V2, DAG, Subtarget))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
return lowerVectorShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
}
/// Handle lowering of 16-lane 32-bit integer shuffles.
static SDValue lowerV16I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// If the shuffle mask is repeated in each 128-bit lane we can use more
// efficient instructions that mirror the shuffles across the four 128-bit
// lanes.
SmallVector<int, 4> RepeatedMask;
bool Is128BitLaneRepeatedShuffle =
is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
if (Is128BitLaneRepeatedShuffle) {
assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
if (V2.isUndef())
return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
return V;
}
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use VALIGN.
if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v16i32, V1, V2,
Mask, Subtarget, DAG))
return Rotate;
// Try to use byte rotation instructions.
if (Subtarget.hasBWI())
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
return Rotate;
// Assume that a single SHUFPS is faster than using a permv shuffle.
// If some CPU is harmed by the domain switch, we can fix it in a later pass.
if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
SDValue ShufPS = lowerVectorShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
CastV1, CastV2, DAG);
return DAG.getBitcast(MVT::v16i32, ShufPS);
}
// If we have AVX512F support, we can use VEXPAND.
if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask,
V1, V2, DAG, Subtarget))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
return lowerVectorShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
}
/// Handle lowering of 32-lane 16-bit integer shuffles.
static SDValue lowerV32I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
return V;
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v32i16, V1, V2, Mask, Subtarget, DAG))
return Rotate;
if (V2.isUndef()) {
SmallVector<int, 8> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
// As this is a single-input shuffle, the repeated mask should be
// a strictly valid v8i16 mask that we can pass through to the v8i16
// lowering to handle even the v32 case.
return lowerV8I16GeneralSingleInputVectorShuffle(
DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
}
}
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(
DL, MVT::v32i16, Mask, V1, V2, Zeroable, Subtarget, DAG))
return PSHUFB;
return lowerVectorShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
}
/// Handle lowering of 64-lane 8-bit integer shuffles.
static SDValue lowerV64I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
const APInt &Zeroable,
SDValue V1, SDValue V2,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
// Whenever we can lower this as a zext, that instruction is strictly faster
// than any alternative. It also allows us to fold memory operands into the
// shuffle in many cases.
if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
return ZExt;
// Use dedicated unpack instructions for masks that match their pattern.
if (SDValue V =
lowerVectorShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
return V;
// Try to use shift instructions.
if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Shift;
// Try to use byte rotation instructions.
if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
return Rotate;
if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB(
DL, MVT::v64i8, Mask, V1, V2, Zeroable, Subtarget, DAG))
return PSHUFB;
// VBMI can use VPERMV/VPERMV3 byte shuffles.
if (Subtarget.hasVBMI())
return lowerVectorShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);
// Try to create an in-lane repeating shuffle mask and then shuffle the
// results into the target lanes.
if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
return V;
if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
Zeroable, Subtarget, DAG))
return Blend;
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
}
/// High-level routine to lower various 512-bit x86 vector shuffles.
///
/// This routine either breaks down the specific type of a 512-bit x86 vector
/// shuffle or splits it into two 256-bit shuffles and fuses the results back
/// together based on the available instructions.
static SDValue lower512BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
MVT VT, SDValue V1, SDValue V2,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Subtarget.hasAVX512() &&
"Cannot lower 512-bit vectors w/ basic ISA!");
// If we have a single input to the zero element, insert that into V1 if we
// can do so cheaply.
int NumElts = Mask.size();
int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
if (NumV2Elements == 1 && Mask[0] >= NumElts)
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
return Insertion;
// Handle special cases where the lower or upper half is UNDEF.
if (SDValue V =
lowerVectorShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
return V;
// Check for being able to broadcast a single element.
if (SDValue Broadcast =
lowerVectorShuffleAsBroadcast(DL, VT, V1, V2, Mask, Subtarget, DAG))
return Broadcast;
// Dispatch to each element type for lowering. If we don't have support for
// specific element type shuffles at 512 bits, immediately split them and
// lower them. Each lowering routine of a given type is allowed to assume that
// the requisite ISA extensions for that element type are available.
switch (VT.SimpleTy) {
case MVT::v8f64:
return lowerV8F64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v16f32:
return lowerV16F32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v8i64:
return lowerV8I64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v16i32:
return lowerV16I32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v32i16:
return lowerV32I16VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
case MVT::v64i8:
return lowerV64I8VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
default:
llvm_unreachable("Not a valid 512-bit x86 vector type!");
}
}
// Lower vXi1 vector shuffles.
// There is no a dedicated instruction on AVX-512 that shuffles the masks.
// The only way to shuffle bits is to sign-extend the mask vector to SIMD
// vector, shuffle and then truncate it back.
static SDValue lower1BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask,
MVT VT, SDValue V1, SDValue V2,
const APInt &Zeroable,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
unsigned NumElts = Mask.size();
// Try to recognize shuffles that are just padding a subvector with zeros.
unsigned SubvecElts = 0;
for (int i = 0; i != (int)NumElts; ++i) {
if (Mask[i] >= 0 && Mask[i] != i)
break;
++SubvecElts;
}
assert(SubvecElts != NumElts && "Identity shuffle?");
// Clip to a power 2.
SubvecElts = PowerOf2Floor(SubvecElts);
// Make sure the number of zeroable bits in the top at least covers the bits
// not covered by the subvector.
if (Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
V1, DAG.getIntPtrConstant(0, DL));
return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
getZeroVector(VT, Subtarget, DAG, DL),
Extract, DAG.getIntPtrConstant(0, DL));
}
assert(Subtarget.hasAVX512() &&
"Cannot lower 512-bit vectors w/o basic ISA!");
MVT ExtVT;
switch (VT.SimpleTy) {
default:
llvm_unreachable("Expected a vector of i1 elements");
case MVT::v2i1:
ExtVT = MVT::v2i64;
break;
case MVT::v4i1:
ExtVT = MVT::v4i32;
break;
case MVT::v8i1:
// Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
// shuffle.
ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
break;
case MVT::v16i1:
// Take 512-bit type, unless we are avoiding 512-bit types and have the
// 256-bit operation available.
ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
break;
case MVT::v32i1:
// Take 512-bit type, unless we are avoiding 512-bit types and have the
// 256-bit operation available.
assert(Subtarget.hasBWI() && "Expected AVX512BW support");
ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
break;
case MVT::v64i1:
ExtVT = MVT::v64i8;
break;
}
V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
// i1 was sign extended we can use X86ISD::CVT2MASK.
int NumElems = VT.getVectorNumElements();
if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
(Subtarget.hasDQI() && (NumElems < 32)))
return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
Shuffle, ISD::SETGT);
return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
}
/// Helper function that returns true if the shuffle mask should be
/// commuted to improve canonicalization.
static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
int NumElements = Mask.size();
int NumV1Elements = 0, NumV2Elements = 0;
for (int M : Mask)
if (M < 0)
continue;
else if (M < NumElements)
++NumV1Elements;
else
++NumV2Elements;
// Commute the shuffle as needed such that more elements come from V1 than
// V2. This allows us to match the shuffle pattern strictly on how many
// elements come from V1 without handling the symmetric cases.
if (NumV2Elements > NumV1Elements)
return true;
assert(NumV1Elements > 0 && "No V1 indices");
if (NumV2Elements == 0)
return false;
// When the number of V1 and V2 elements are the same, try to minimize the
// number of uses of V2 in the low half of the vector. When that is tied,
// ensure that the sum of indices for V1 is equal to or lower than the sum
// indices for V2. When those are equal, try to ensure that the number of odd
// indices for V1 is lower than the number of odd indices for V2.
if (NumV1Elements == NumV2Elements) {
int LowV1Elements = 0, LowV2Elements = 0;
for (int M : Mask.slice(0, NumElements / 2))
if (M >= NumElements)
++LowV2Elements;
else if (M >= 0)
++LowV1Elements;
if (LowV2Elements > LowV1Elements)
return true;
if (LowV2Elements == LowV1Elements) {
int SumV1Indices = 0, SumV2Indices = 0;
for (int i = 0, Size = Mask.size(); i < Size; ++i)
if (Mask[i] >= NumElements)
SumV2Indices += i;
else if (Mask[i] >= 0)
SumV1Indices += i;
if (SumV2Indices < SumV1Indices)
return true;
if (SumV2Indices == SumV1Indices) {
int NumV1OddIndices = 0, NumV2OddIndices = 0;
for (int i = 0, Size = Mask.size(); i < Size; ++i)
if (Mask[i] >= NumElements)
NumV2OddIndices += i % 2;
else if (Mask[i] >= 0)
NumV1OddIndices += i % 2;
if (NumV2OddIndices < NumV1OddIndices)
return true;
}
}
}
return false;
}
/// Top-level lowering for x86 vector shuffles.
///
/// This handles decomposition, canonicalization, and lowering of all x86
/// vector shuffles. Most of the specific lowering strategies are encapsulated
/// above in helper routines. The canonicalization attempts to widen shuffles
/// to involve fewer lanes of wider elements, consolidate symmetric patterns
/// s.t. only one of the two inputs needs to be tested, etc.
static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
ArrayRef<int> Mask = SVOp->getMask();
SDValue V1 = Op.getOperand(0);
SDValue V2 = Op.getOperand(1);
MVT VT = Op.getSimpleValueType();
int NumElements = VT.getVectorNumElements();
SDLoc DL(Op);
bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
"Can't lower MMX shuffles");
bool V1IsUndef = V1.isUndef();
bool V2IsUndef = V2.isUndef();
if (V1IsUndef && V2IsUndef)
return DAG.getUNDEF(VT);
// When we create a shuffle node we put the UNDEF node to second operand,
// but in some cases the first operand may be transformed to UNDEF.
// In this case we should just commute the node.
if (V1IsUndef)
return DAG.getCommutedVectorShuffle(*SVOp);
// Check for non-undef masks pointing at an undef vector and make the masks
// undef as well. This makes it easier to match the shuffle based solely on
// the mask.
if (V2IsUndef)
for (int M : Mask)
if (M >= NumElements) {
SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
for (int &M : NewMask)
if (M >= NumElements)
M = -1;
return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
}
// Check for illegal shuffle mask element index values.
int MaskUpperLimit = Mask.size() * (V2IsUndef ? 1 : 2); (void)MaskUpperLimit;
assert(llvm::all_of(Mask,
[&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
"Out of bounds shuffle index");
// We actually see shuffles that are entirely re-arrangements of a set of
// zero inputs. This mostly happens while decomposing complex shuffles into
// simple ones. Directly lower these as a buildvector of zeros.
APInt Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
if (Zeroable.isAllOnesValue())
return getZeroVector(VT, Subtarget, DAG, DL);
bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
// Create an alternative mask with info about zeroable elements.
// Here we do not set undef elements as zeroable.
SmallVector<int, 64> ZeroableMask(Mask.begin(), Mask.end());
if (V2IsZero) {
assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!");
for (int i = 0; i != NumElements; ++i)
if (Mask[i] != SM_SentinelUndef && Zeroable[i])
ZeroableMask[i] = SM_SentinelZero;
}
// Try to collapse shuffles into using a vector type with fewer elements but
// wider element types. We cap this to not form integers or floating point
// elements wider than 64 bits, but it might be interesting to form i128
// integers to handle flipping the low and high halves of AVX 256-bit vectors.
SmallVector<int, 16> WidenedMask;
if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
canWidenShuffleElements(ZeroableMask, WidenedMask)) {
MVT NewEltVT = VT.isFloatingPoint()
? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
: MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
int NewNumElts = NumElements / 2;
MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
// Make sure that the new vector type is legal. For example, v2f64 isn't
// legal on SSE1.
if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
if (V2IsZero) {
// Modify the new Mask to take all zeros from the all-zero vector.
// Choose indices that are blend-friendly.
bool UsedZeroVector = false;
assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
"V2's non-undef elements are used?!");
for (int i = 0; i != NewNumElts; ++i)
if (WidenedMask[i] == SM_SentinelZero) {
WidenedMask[i] = i + NewNumElts;
UsedZeroVector = true;
}
// Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
// some elements to be undef.
if (UsedZeroVector)
V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
}
V1 = DAG.getBitcast(NewVT, V1);
V2 = DAG.getBitcast(NewVT, V2);
return DAG.getBitcast(
VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
}
}
// Commute the shuffle if it will improve canonicalization.
if (canonicalizeShuffleMaskWithCommute(Mask))
return DAG.getCommutedVectorShuffle(*SVOp);
if (SDValue V =
lowerVectorShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget))
return V;
// For each vector width, delegate to a specialized lowering routine.
if (VT.is128BitVector())
return lower128BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget,
DAG);
if (VT.is256BitVector())
return lower256BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget,
DAG);
if (VT.is512BitVector())
return lower512BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget,
DAG);
if (Is1BitVector)
return lower1BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget,
DAG);
llvm_unreachable("Unimplemented!");
}
/// Try to lower a VSELECT instruction to a vector shuffle.
static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Cond = Op.getOperand(0);
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
return SDValue();
auto *CondBV = cast<BuildVectorSDNode>(Cond);
// Only non-legal VSELECTs reach this lowering, convert those into generic
// shuffles and re-use the shuffle lowering path for blends.
SmallVector<int, 32> Mask;
for (int i = 0, Size = VT.getVectorNumElements(); i < Size; ++i) {
SDValue CondElt = CondBV->getOperand(i);
int M = i;
// We can't map undef to undef here. They have different meanings. Treat
// as the same as zero.
if (CondElt.isUndef() || isNullConstant(CondElt))
M += Size;
Mask.push_back(M);
}
return DAG.getVectorShuffle(VT, dl, LHS, RHS, Mask);
}
SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
// A vselect where all conditions and data are constants can be optimized into
// a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
return SDValue();
// Try to lower this to a blend-style vector shuffle. This can handle all
// constant condition cases.
if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
return BlendOp;
// If this VSELECT has a vector if i1 as a mask, it will be directly matched
// with patterns on the mask registers on AVX-512.
if (Op->getOperand(0).getValueType().getScalarSizeInBits() == 1)
return Op;
// Variable blends are only legal from SSE4.1 onward.
if (!Subtarget.hasSSE41())
return SDValue();
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
// If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
// into an i1 condition so that we can use the mask-based 512-bit blend
// instructions.
if (VT.getSizeInBits() == 512) {
SDValue Cond = Op.getOperand(0);
// The vNi1 condition case should be handled above as it can be trivially
// lowered.
assert(Cond.getValueType().getScalarSizeInBits() ==
VT.getScalarSizeInBits() &&
"Should have a size-matched integer condition!");
// Build a mask by testing the condition against zero.
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
getZeroVector(VT, Subtarget, DAG, dl),
ISD::SETNE);
// Now return a new VSELECT using the mask.
return DAG.getSelect(dl, VT, Mask, Op.getOperand(1), Op.getOperand(2));
}
// Only some types will be legal on some subtargets. If we can emit a legal
// VSELECT-matching blend, return Op, and but if we need to expand, return
// a null value.
switch (VT.SimpleTy) {
default:
// Most of the vector types have blends past SSE4.1.
return Op;
case MVT::v32i8:
// The byte blends for AVX vectors were introduced only in AVX2.
if (Subtarget.hasAVX2())
return Op;
return SDValue();
case MVT::v8i16:
case MVT::v16i16: {
// Bitcast everything to the vXi8 type and use a vXi8 vselect.
MVT CastVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
SDValue Cond = DAG.getBitcast(CastVT, Op->getOperand(0));
SDValue LHS = DAG.getBitcast(CastVT, Op->getOperand(1));
SDValue RHS = DAG.getBitcast(CastVT, Op->getOperand(2));
SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
return DAG.getBitcast(VT, Select);
}
}
}
static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
return SDValue();
if (VT.getSizeInBits() == 8) {
SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
Op.getOperand(0), Op.getOperand(1));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
}
if (VT == MVT::f32) {
// EXTRACTPS outputs to a GPR32 register which will require a movd to copy
// the result back to FR32 register. It's only worth matching if the
// result has a single use which is a store or a bitcast to i32. And in
// the case of a store, it's not worth it if the index is a constant 0,
// because a MOVSSmr can be used instead, which is smaller and faster.
if (!Op.hasOneUse())
return SDValue();
SDNode *User = *Op.getNode()->use_begin();
if ((User->getOpcode() != ISD::STORE ||
isNullConstant(Op.getOperand(1))) &&
(User->getOpcode() != ISD::BITCAST ||
User->getValueType(0) != MVT::i32))
return SDValue();
SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
Op.getOperand(1));
return DAG.getBitcast(MVT::f32, Extract);
}
if (VT == MVT::i32 || VT == MVT::i64) {
// ExtractPS/pextrq works with constant index.
if (isa<ConstantSDNode>(Op.getOperand(1)))
return Op;
}
return SDValue();
}
/// Extract one bit from mask vector, like v16i1 or v8i1.
/// AVX-512 feature.
static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue Vec = Op.getOperand(0);
SDLoc dl(Vec);
MVT VecVT = Vec.getSimpleValueType();
SDValue Idx = Op.getOperand(1);
MVT EltVT = Op.getSimpleValueType();
assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
"Unexpected vector type in ExtractBitFromMaskVector");
// variable index can't be handled in mask registers,
// extend vector to VR512/128
if (!isa<ConstantSDNode>(Idx)) {
unsigned NumElts = VecVT.getVectorNumElements();
// Extending v8i1/v16i1 to 512-bit get better performance on KNL
// than extending to 128/256bit.
MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
}
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
// If the kshift instructions of the correct width aren't natively supported
// then we need to promote the vector to the native size to get the correct
// zeroing behavior.
if (VecVT.getVectorNumElements() < 16) {
VecVT = MVT::v16i1;
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
DAG.getUNDEF(VecVT), Vec,
DAG.getIntPtrConstant(0, dl));
}
// Extracts from element 0 are always allowed.
if (IdxVal != 0) {
// Use kshiftr instruction to move to the lower element.
Vec = DAG.getNode(X86ISD::KSHIFTR, dl, VecVT, Vec,
DAG.getConstant(IdxVal, dl, MVT::i8));
}
// Shrink to v16i1 since that's always legal.
if (VecVT.getVectorNumElements() > 16) {
VecVT = MVT::v16i1;
Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VecVT, Vec,
DAG.getIntPtrConstant(0, dl));
}
// Convert to a bitcast+aext/trunc.
MVT CastVT = MVT::getIntegerVT(VecVT.getVectorNumElements());
return DAG.getAnyExtOrTrunc(DAG.getBitcast(CastVT, Vec), dl, EltVT);
}
SDValue
X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
MVT VecVT = Vec.getSimpleValueType();
SDValue Idx = Op.getOperand(1);
if (VecVT.getVectorElementType() == MVT::i1)
return ExtractBitFromMaskVector(Op, DAG, Subtarget);
if (!isa<ConstantSDNode>(Idx)) {
// Its more profitable to go through memory (1 cycles throughput)
// than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
// IACA tool was used to get performance estimation
// (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
//
// example : extractelement <16 x i8> %a, i32 %i
//
// Block Throughput: 3.00 Cycles
// Throughput Bottleneck: Port5
//
// | Num Of | Ports pressure in cycles | |
// | Uops | 0 - DV | 5 | 6 | 7 | |
// ---------------------------------------------
// | 1 | | 1.0 | | | CP | vmovd xmm1, edi
// | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
// | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
// Total Num Of Uops: 4
//
//
// Block Throughput: 1.00 Cycles
// Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
//
// | | Ports pressure in cycles | |
// |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
// ---------------------------------------------------------
// |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
// |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
// |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
// Total Num Of Uops: 4
return SDValue();
}
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
// If this is a 256-bit vector result, first extract the 128-bit vector and
// then extract the element from the 128-bit vector.
if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
// Get the 128-bit vector.
Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
MVT EltVT = VecVT.getVectorElementType();
unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
// Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
// this can be done with a mask.
IdxVal &= ElemsPerChunk - 1;
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
DAG.getConstant(IdxVal, dl, MVT::i32));
}
assert(VecVT.is128BitVector() && "Unexpected vector length");
MVT VT = Op.getSimpleValueType();
if (VT.getSizeInBits() == 16) {
// If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
// we're going to zero extend the register or fold the store (SSE41 only).
if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
!(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getBitcast(MVT::v4i32, Vec), Idx));
// Transform it so it match pextrw which produces a 32-bit result.
SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
Op.getOperand(0), Op.getOperand(1));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
}
if (Subtarget.hasSSE41())
if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
return Res;
// TODO: We only extract a single element from v16i8, we can probably afford
// to be more aggressive here before using the default approach of spilling to
// stack.
if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
// Extract either the lowest i32 or any i16, and extract the sub-byte.
int DWordIdx = IdxVal / 4;
if (DWordIdx == 0) {
SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getBitcast(MVT::v4i32, Vec),
DAG.getIntPtrConstant(DWordIdx, dl));
int ShiftVal = (IdxVal % 4) * 8;
if (ShiftVal != 0)
Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
DAG.getConstant(ShiftVal, dl, MVT::i8));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
}
int WordIdx = IdxVal / 2;
SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
DAG.getBitcast(MVT::v8i16, Vec),
DAG.getIntPtrConstant(WordIdx, dl));
int ShiftVal = (IdxVal % 2) * 8;
if (ShiftVal != 0)
Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
DAG.getConstant(ShiftVal, dl, MVT::i8));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
}
if (VT.getSizeInBits() == 32) {
if (IdxVal == 0)
return Op;
// SHUFPS the element to the lowest double word, then movss.
int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
DAG.getIntPtrConstant(0, dl));
}
if (VT.getSizeInBits() == 64) {
// FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
// FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
// to match extract_elt for f64.
if (IdxVal == 0)
return Op;
// UNPCKHPD the element to the lowest double word, then movsd.
// Note if the lower 64 bits of the result of the UNPCKHPD is then stored
// to a f64mem, the whole operation is folded into a single MOVHPDmr.
int Mask[2] = { 1, -1 };
Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
DAG.getIntPtrConstant(0, dl));
}
return SDValue();
}
/// Insert one bit to mask vector, like v16i1 or v8i1.
/// AVX-512 feature.
static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
SDValue Elt = Op.getOperand(1);
SDValue Idx = Op.getOperand(2);
MVT VecVT = Vec.getSimpleValueType();
if (!isa<ConstantSDNode>(Idx)) {
// Non constant index. Extend source and destination,
// insert element and then truncate the result.
unsigned NumElts = VecVT.getVectorNumElements();
MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
}
// Copy into a k-register, extract to v1i1 and insert_subvector.
SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec,
Op.getOperand(2));
}
SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
SelectionDAG &DAG) const {
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
unsigned NumElts = VT.getVectorNumElements();
if (EltVT == MVT::i1)
return InsertBitToMaskVector(Op, DAG, Subtarget);
SDLoc dl(Op);
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
SDValue N2 = Op.getOperand(2);
if (!isa<ConstantSDNode>(N2))
return SDValue();
auto *N2C = cast<ConstantSDNode>(N2);
unsigned IdxVal = N2C->getZExtValue();
bool IsZeroElt = X86::isZeroNode(N1);
bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
// If we are inserting a element, see if we can do this more efficiently with
// a blend shuffle with a rematerializable vector than a costly integer
// insertion.
if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
16 <= EltVT.getSizeInBits()) {
SmallVector<int, 8> BlendMask;
for (unsigned i = 0; i != NumElts; ++i)
BlendMask.push_back(i == IdxVal ? i + NumElts : i);
SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
: getOnesVector(VT, DAG, dl);
return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
}
// If the vector is wider than 128 bits, extract the 128-bit subvector, insert
// into that, and then insert the subvector back into the result.
if (VT.is256BitVector() || VT.is512BitVector()) {
// With a 256-bit vector, we can insert into the zero element efficiently
// using a blend if we have AVX or AVX2 and the right data type.
if (VT.is256BitVector() && IdxVal == 0) {
// TODO: It is worthwhile to cast integer to floating point and back
// and incur a domain crossing penalty if that's what we'll end up
// doing anyway after extracting to a 128-bit vector.
if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
(Subtarget.hasAVX2() && EltVT == MVT::i32)) {
SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
N2 = DAG.getIntPtrConstant(1, dl);
return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2);
}
}
// Get the desired 128-bit vector chunk.
SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
// Insert the element into the desired chunk.
unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
assert(isPowerOf2_32(NumEltsIn128));
// Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
DAG.getConstant(IdxIn128, dl, MVT::i32));
// Insert the changed part back into the bigger vector
return insert128BitVector(N0, V, IdxVal, DAG, dl);
}
assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
// Transform it so it match pinsr{b,w} which expects a GR32 as its second
// argument. SSE41 required for pinsrb.
if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
unsigned Opc;
if (VT == MVT::v8i16) {
assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
Opc = X86ISD::PINSRW;
} else {
assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
Opc = X86ISD::PINSRB;
}
if (N1.getValueType() != MVT::i32)
N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
if (N2.getValueType() != MVT::i32)
N2 = DAG.getIntPtrConstant(IdxVal, dl);
return DAG.getNode(Opc, dl, VT, N0, N1, N2);
}
if (Subtarget.hasSSE41()) {
if (EltVT == MVT::f32) {
// Bits [7:6] of the constant are the source select. This will always be
// zero here. The DAG Combiner may combine an extract_elt index into
// these bits. For example (insert (extract, 3), 2) could be matched by
// putting the '3' into bits [7:6] of X86ISD::INSERTPS.
// Bits [5:4] of the constant are the destination select. This is the
// value of the incoming immediate.
// Bits [3:0] of the constant are the zero mask. The DAG Combiner may
// combine either bitwise AND or insert of float 0.0 to set these bits.
bool MinSize = DAG.getMachineFunction().getFunction().optForMinSize();
if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
// If this is an insertion of 32-bits into the low 32-bits of
// a vector, we prefer to generate a blend with immediate rather
// than an insertps. Blends are simpler operations in hardware and so
// will always have equal or better performance than insertps.
// But if optimizing for size and there's a load folding opportunity,
// generate insertps because blendps does not have a 32-bit memory
// operand form.
N2 = DAG.getIntPtrConstant(1, dl);
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1, N2);
}
N2 = DAG.getIntPtrConstant(IdxVal << 4, dl);
// Create this as a scalar to vector..
N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
}
// PINSR* works with constant index.
if (EltVT == MVT::i32 || EltVT == MVT::i64)
return Op;
}
return SDValue();
}
static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
MVT OpVT = Op.getSimpleValueType();
// It's always cheaper to replace a xor+movd with xorps and simplifies further
// combines.
if (X86::isZeroNode(Op.getOperand(0)))
return getZeroVector(OpVT, Subtarget, DAG, dl);
// If this is a 256-bit vector result, first insert into a 128-bit
// vector and then insert into the 256-bit vector.
if (!OpVT.is128BitVector()) {
// Insert into a 128-bit vector.
unsigned SizeFactor = OpVT.getSizeInBits() / 128;
MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
OpVT.getVectorNumElements() / SizeFactor);
Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
// Insert the 128-bit vector.
return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
}
assert(OpVT.is128BitVector() && "Expected an SSE type!");
// Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
if (OpVT == MVT::v4i32)
return Op;
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
return DAG.getBitcast(
OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
}
// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
// simple superregister reference or explicit instructions to insert
// the upper bits of a vector.
static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
return insert1BitVector(Op, DAG, Subtarget);
}
static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
"Only vXi1 extract_subvectors need custom lowering");
SDLoc dl(Op);
SDValue Vec = Op.getOperand(0);
SDValue Idx = Op.getOperand(1);
if (!isa<ConstantSDNode>(Idx))
return SDValue();
unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
if (IdxVal == 0) // the operation is legal
return Op;
MVT VecVT = Vec.getSimpleValueType();
unsigned NumElems = VecVT.getVectorNumElements();
// Extend to natively supported kshift.
MVT WideVecVT = VecVT;
if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
DAG.getUNDEF(WideVecVT), Vec,
DAG.getIntPtrConstant(0, dl));
}
// Shift to the LSB.
Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
DAG.getConstant(IdxVal, dl, MVT::i8));
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
DAG.getIntPtrConstant(0, dl));
}
// Returns the appropriate wrapper opcode for a global reference.
unsigned X86TargetLowering::getGlobalWrapperKind(
const GlobalValue *GV, const unsigned char OpFlags) const {
// References to absolute symbols are never PC-relative.
if (GV && GV->isAbsoluteSymbolRef())
return X86ISD::Wrapper;
CodeModel::Model M = getTargetMachine().getCodeModel();
if (Subtarget.isPICStyleRIPRel() &&
(M == CodeModel::Small || M == CodeModel::Kernel))
return X86ISD::WrapperRIP;
// GOTPCREL references must always use RIP.
if (OpFlags == X86II::MO_GOTPCREL)
return X86ISD::WrapperRIP;
return X86ISD::Wrapper;
}
// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
// their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
// one of the above mentioned nodes. It has to be wrapped because otherwise
// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
// be used to form addressing mode. These wrapped nodes will be selected
// into MOV32ri.
SDValue
X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetConstantPool(
CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
SDLoc DL(CP);
Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
// With PIC, the address is actually $g + Offset.
if (OpFlag) {
Result =
DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
}
return Result;
}
SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
SDLoc DL(JT);
Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
// With PIC, the address is actually $g + Offset.
if (OpFlag)
Result =
DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
return Result;
}
SDValue
X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
unsigned char OpFlag = Subtarget.classifyGlobalReference(nullptr, *Mod);
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag);
SDLoc DL(Op);
Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
// With PIC, the address is actually $g + Offset.
if (isPositionIndependent() && !Subtarget.is64Bit()) {
Result =
DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
}
// For symbols that require a load from a stub to get the address, emit the
// load.
if (isGlobalStubReference(OpFlag))
Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
return Result;
}
SDValue
X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
// Create the TargetBlockAddressAddress node.
unsigned char OpFlags =
Subtarget.classifyBlockAddressReference();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
SDLoc dl(Op);
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
// With PIC, the address is actually $g + Offset.
if (isGlobalRelativeToPICBase(OpFlags)) {
Result = DAG.getNode(ISD::ADD, dl, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
}
return Result;
}
SDValue X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV,
const SDLoc &dl, int64_t Offset,
SelectionDAG &DAG) const {
// Create the TargetGlobalAddress node, folding in the constant
// offset if it is legal.
unsigned char OpFlags = Subtarget.classifyGlobalReference(GV);
CodeModel::Model M = DAG.getTarget().getCodeModel();
auto PtrVT = getPointerTy(DAG.getDataLayout());
SDValue Result;
if (OpFlags == X86II::MO_NO_FLAG &&
X86::isOffsetSuitableForCodeModel(Offset, M)) {
// A direct static reference to a global.
Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset);
Offset = 0;
} else {
Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, OpFlags);
}
Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
// With PIC, the address is actually $g + Offset.
if (isGlobalRelativeToPICBase(OpFlags)) {
Result = DAG.getNode(ISD::ADD, dl, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
}
// For globals that require a load from a stub to get the address, emit the
// load.
if (isGlobalStubReference(OpFlags))
Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
// If there was a non-zero offset that we didn't fold, create an explicit
// addition for it.
if (Offset != 0)
Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
DAG.getConstant(Offset, dl, PtrVT));
return Result;
}
SDValue
X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
}
static SDValue
GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
unsigned char OperandFlags, bool LocalDynamic = false) {
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SDLoc dl(GA);
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0),
GA->getOffset(),
OperandFlags);
X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
: X86ISD::TLSADDR;
if (InFlag) {
SDValue Ops[] = { Chain, TGA, *InFlag };
Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
} else {
SDValue Ops[] = { Chain, TGA };
Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
}
// TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
MFI.setAdjustsStack(true);
MFI.setHasCalls(true);
SDValue Flag = Chain.getValue(1);
return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
}
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
static SDValue
LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const EVT PtrVT) {
SDValue InFlag;
SDLoc dl(GA); // ? function entry point might be better
SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
DAG.getNode(X86ISD::GlobalBaseReg,
SDLoc(), PtrVT), InFlag);
InFlag = Chain.getValue(1);
return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
}
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
static SDValue
LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const EVT PtrVT) {
return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
X86::RAX, X86II::MO_TLSGD);
}
static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
SelectionDAG &DAG,
const EVT PtrVT,
bool is64Bit) {
SDLoc dl(GA);
// Get the start address of the TLS block for this module.
X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
.getInfo<X86MachineFunctionInfo>();
MFI->incNumLocalDynamicTLSAccesses();
SDValue Base;
if (is64Bit) {
Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
X86II::MO_TLSLD, /*LocalDynamic=*/true);
} else {
SDValue InFlag;
SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
InFlag = Chain.getValue(1);
Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
X86II::MO_TLSLDM, /*LocalDynamic=*/true);
}
// Note: the CleanupLocalDynamicTLSPass will remove redundant computations
// of Base.
// Build x@dtpoff.
unsigned char OperandFlags = X86II::MO_DTPOFF;
unsigned WrapperKind = X86ISD::Wrapper;
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0),
GA->getOffset(), OperandFlags);
SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
// Add x@dtpoff with the base.
return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
}
// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
const EVT PtrVT, TLSModel::Model model,
bool is64Bit, bool isPIC) {
SDLoc dl(GA);
// Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
is64Bit ? 257 : 256));
SDValue ThreadPointer =
DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
MachinePointerInfo(Ptr));
unsigned char OperandFlags = 0;
// Most TLS accesses are not RIP relative, even on x86-64. One exception is
// initialexec.
unsigned WrapperKind = X86ISD::Wrapper;
if (model == TLSModel::LocalExec) {
OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
} else if (model == TLSModel::InitialExec) {
if (is64Bit) {
OperandFlags = X86II::MO_GOTTPOFF;
WrapperKind = X86ISD::WrapperRIP;
} else {
OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
}
} else {
llvm_unreachable("Unexpected model");
}
// emit "addl x@ntpoff,%eax" (local exec)
// or "addl x@indntpoff,%eax" (initial exec)
// or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
SDValue TGA =
DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
GA->getOffset(), OperandFlags);
SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
if (model == TLSModel::InitialExec) {
if (isPIC && !is64Bit) {
Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
Offset);
}
Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
MachinePointerInfo::getGOT(DAG.getMachineFunction()));
}
// The address of the thread local variable is the add of the thread
// pointer with the offset of the variable.
return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
}
SDValue
X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
if (DAG.getTarget().useEmulatedTLS())
return LowerToTLSEmulatedModel(GA, DAG);
const GlobalValue *GV = GA->getGlobal();
auto PtrVT = getPointerTy(DAG.getDataLayout());
bool PositionIndependent = isPositionIndependent();
if (Subtarget.isTargetELF()) {
TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
switch (model) {
case TLSModel::GeneralDynamic:
if (Subtarget.is64Bit())
return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
case TLSModel::LocalDynamic:
return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
Subtarget.is64Bit());
case TLSModel::InitialExec:
case TLSModel::LocalExec:
return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
PositionIndependent);
}
llvm_unreachable("Unknown TLS model.");
}
if (Subtarget.isTargetDarwin()) {
// Darwin only has one model of TLS. Lower to that.
unsigned char OpFlag = 0;
unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
X86ISD::WrapperRIP : X86ISD::Wrapper;
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
if (PIC32)
OpFlag = X86II::MO_TLVP_PIC_BASE;
else
OpFlag = X86II::MO_TLVP;
SDLoc DL(Op);
SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
GA->getValueType(0),
GA->getOffset(), OpFlag);
SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
// With PIC32, the address is actually $g + Offset.
if (PIC32)
Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
Offset);
// Lowering the machine isd will make sure everything is in the right
// location.
SDValue Chain = DAG.getEntryNode();
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
SDValue Args[] = { Chain, Offset };
Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
DAG.getIntPtrConstant(0, DL, true),
Chain.getValue(1), DL);
// TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
MFI.setAdjustsStack(true);
// And our return value (tls address) is in the standard call return value
// location.
unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
}
if (Subtarget.isTargetKnownWindowsMSVC() ||
Subtarget.isTargetWindowsItanium() ||
Subtarget.isTargetWindowsGNU()) {
// Just use the implicit TLS architecture
// Need to generate something similar to:
// mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
// ; from TEB
// mov ecx, dword [rel _tls_index]: Load index (from C runtime)
// mov rcx, qword [rdx+rcx*8]
// mov eax, .tls$:tlsvar
// [rax+rcx] contains the address
// Windows 64bit: gs:0x58
// Windows 32bit: fs:__tls_array
SDLoc dl(GA);
SDValue Chain = DAG.getEntryNode();
// Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
// %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
// use its literal value of 0x2C.
Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
? Type::getInt8PtrTy(*DAG.getContext(),
256)
: Type::getInt32PtrTy(*DAG.getContext(),
257));
SDValue TlsArray = Subtarget.is64Bit()
? DAG.getIntPtrConstant(0x58, dl)
: (Subtarget.isTargetWindowsGNU()
? DAG.getIntPtrConstant(0x2C, dl)
: DAG.getExternalSymbol("_tls_array", PtrVT));
SDValue ThreadPointer =
DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
SDValue res;
if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
res = ThreadPointer;
} else {
// Load the _tls_index variable
SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
if (Subtarget.is64Bit())
IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
MachinePointerInfo(), MVT::i32);
else
IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
auto &DL = DAG.getDataLayout();
SDValue Scale =
DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
}
res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
// Get the offset of start of .tls section
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
GA->getValueType(0),
GA->getOffset(), X86II::MO_SECREL);
SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
// The address of the thread local variable is the add of the thread
// pointer with the offset of the variable.
return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
}
llvm_unreachable("TLS not implemented for this target.");
}
/// Lower SRA_PARTS and friends, which return two i32 values
/// and take a 2 x i32 value to shift plus a shift amount.
static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
MVT VT = Op.getSimpleValueType();
unsigned VTBits = VT.getSizeInBits();
SDLoc dl(Op);
bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
SDValue ShOpLo = Op.getOperand(0);
SDValue ShOpHi = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
// X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
// generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
// during isel.
SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
DAG.getConstant(VTBits - 1, dl, MVT::i8));
SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
DAG.getConstant(VTBits - 1, dl, MVT::i8))
: DAG.getConstant(0, dl, VT);
SDValue Tmp2, Tmp3;
if (Op.getOpcode() == ISD::SHL_PARTS) {
Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
} else {
Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
}
// If the shift amount is larger or equal than the width of a part we can't
// rely on the results of shld/shrd. Insert a test and select the appropriate
// values for large shift amounts.
SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
DAG.getConstant(VTBits, dl, MVT::i8));
SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode,
DAG.getConstant(0, dl, MVT::i8), ISD::SETNE);
SDValue Hi, Lo;
if (Op.getOpcode() == ISD::SHL_PARTS) {
Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
} else {
Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
}
return DAG.getMergeValues({ Lo, Hi }, dl);
}
// Try to use a packed vector operation to handle i64 on 32-bit targets when
// AVX512DQ is enabled.
static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert((Op.getOpcode() == ISD::SINT_TO_FP ||
Op.getOpcode() == ISD::UINT_TO_FP) && "Unexpected opcode!");
SDValue Src = Op.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
MVT VT = Op.getSimpleValueType();
if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
(VT != MVT::f32 && VT != MVT::f64))
return SDValue();
// Pack the i64 into a vector, do the operation and extract.
// Using 256-bit to ensure result is 128-bits for f32 case.
unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
MVT VecVT = MVT::getVectorVT(VT, NumElts);
SDLoc dl(Op);
SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
DAG.getIntPtrConstant(0, dl));
}
SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const {
SDValue Src = Op.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
if (SrcVT.isVector()) {
if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
DAG.getUNDEF(SrcVT)));
}
return SDValue();
}
assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
"Unknown SINT_TO_FP to lower!");
// These are really Legal; return the operand so the caller accepts it as
// Legal.
if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(VT))
return Op;
if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) && Subtarget.is64Bit()) {
return Op;
}
if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
return V;
SDValue ValueToStore = Op.getOperand(0);
if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) &&
!Subtarget.is64Bit())
// Bitcasting to f64 here allows us to do a single 64-bit store from
// an SSE register, avoiding the store forwarding penalty that would come
// with two 32-bit stores.
ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
unsigned Size = SrcVT.getSizeInBits()/8;
MachineFunction &MF = DAG.getMachineFunction();
auto PtrVT = getPointerTy(MF.getDataLayout());
int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
SDValue Chain = DAG.getStore(
DAG.getEntryNode(), dl, ValueToStore, StackSlot,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
}
SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
SDValue StackSlot,
SelectionDAG &DAG) const {
// Build the FILD
SDLoc DL(Op);
SDVTList Tys;
bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
if (useSSE)
Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
else
Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
unsigned ByteSize = SrcVT.getSizeInBits()/8;
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
MachineMemOperand *MMO;
if (FI) {
int SSFI = FI->getIndex();
MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
MachineMemOperand::MOLoad, ByteSize, ByteSize);
} else {
MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
StackSlot = StackSlot.getOperand(1);
}
SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
X86ISD::FILD, DL,
Tys, Ops, SrcVT, MMO);
if (useSSE) {
Chain = Result.getValue(1);
SDValue InFlag = Result.getValue(2);
// FIXME: Currently the FST is flagged to the FILD_FLAG. This
// shouldn't be necessary except that RFP cannot be live across
// multiple blocks. When stackifier is fixed, they can be uncoupled.
MachineFunction &MF = DAG.getMachineFunction();
unsigned SSFISize = Op.getValueSizeInBits()/8;
int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
auto PtrVT = getPointerTy(MF.getDataLayout());
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
Tys = DAG.getVTList(MVT::Other);
SDValue Ops[] = {
Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
};
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
MachineMemOperand::MOStore, SSFISize, SSFISize);
Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
Ops, Op.getValueType(), MMO);
Result = DAG.getLoad(
Op.getValueType(), DL, Chain, StackSlot,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
}
return Result;
}
/// 64-bit unsigned integer to double expansion.
static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// This algorithm is not obvious. Here it is what we're trying to output:
/*
movq %rax, %xmm0
punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
#ifdef __SSE3__
haddpd %xmm0, %xmm0
#else
pshufd $0x4e, %xmm0, %xmm1
addpd %xmm1, %xmm0
#endif
*/
SDLoc dl(Op);
LLVMContext *Context = DAG.getContext();
// Build some magic constants.
static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
Constant *C0 = ConstantDataVector::get(*Context, CV0);
auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);
SmallVector<Constant*,2> CV1;
CV1.push_back(
ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
APInt(64, 0x4330000000000000ULL))));
CV1.push_back(
ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
APInt(64, 0x4530000000000000ULL))));
Constant *C1 = ConstantVector::get(CV1);
SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);
// Load the 64-bit value into an XMM register.
SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
Op.getOperand(0));
SDValue CLod0 =
DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
/* Alignment = */ 16);
SDValue Unpck1 =
getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
SDValue CLod1 =
DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
/* Alignment = */ 16);
SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
// TODO: Are there any fast-math-flags to propagate here?
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
SDValue Result;
if (Subtarget.hasSSE3()) {
// FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
} else {
SDValue S2F = DAG.getBitcast(MVT::v4i32, Sub);
SDValue Shuffle = DAG.getVectorShuffle(MVT::v4i32, dl, S2F, S2F, {2,3,0,1});
Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
DAG.getBitcast(MVT::v2f64, Shuffle), Sub);
}
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
DAG.getIntPtrConstant(0, dl));
}
/// 32-bit unsigned integer to float expansion.
static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
// FP constant to bias correct the final result.
SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
MVT::f64);
// Load the 32-bit value into an XMM register.
SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
Op.getOperand(0));
// Zero out the upper parts of the register.
Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
DAG.getBitcast(MVT::v2f64, Load),
DAG.getIntPtrConstant(0, dl));
// Or the load with the bias.
SDValue Or = DAG.getNode(
ISD::OR, dl, MVT::v2i64,
DAG.getBitcast(MVT::v2i64,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Load)),
DAG.getBitcast(MVT::v2i64,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
Or =
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
// Subtract the bias.
// TODO: Are there any fast-math-flags to propagate here?
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
// Handle final rounding.
return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
}
static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
const SDLoc &DL) {
if (Op.getSimpleValueType() != MVT::v2f64)
return SDValue();
SDValue N0 = Op.getOperand(0);
assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
// Legalize to v4i32 type.
N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
DAG.getUNDEF(MVT::v2i32));
if (Subtarget.hasAVX512())
return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
// Same implementation as VectorLegalizer::ExpandUINT_TO_FLOAT,
// but using v2i32 to v2f64 with X86ISD::CVTSI2P.
SDValue HalfWord = DAG.getConstant(16, DL, MVT::v4i32);
SDValue HalfWordMask = DAG.getConstant(0x0000FFFF, DL, MVT::v4i32);
// Two to the power of half-word-size.
SDValue TWOHW = DAG.getConstantFP(1 << 16, DL, MVT::v2f64);
// Clear upper part of LO, lower HI.
SDValue HI = DAG.getNode(ISD::SRL, DL, MVT::v4i32, N0, HalfWord);
SDValue LO = DAG.getNode(ISD::AND, DL, MVT::v4i32, N0, HalfWordMask);
SDValue fHI = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, HI);
fHI = DAG.getNode(ISD::FMUL, DL, MVT::v2f64, fHI, TWOHW);
SDValue fLO = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, LO);
// Add the two halves.
return DAG.getNode(ISD::FADD, DL, MVT::v2f64, fHI, fLO);
}
static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// The algorithm is the following:
// #ifdef __SSE4_1__
// uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
// uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
// (uint4) 0x53000000, 0xaa);
// #else
// uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
// uint4 hi = (v >> 16) | (uint4) 0x53000000;
// #endif
// float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
// return (float4) lo + fhi;
// We shouldn't use it when unsafe-fp-math is enabled though: we might later
// reassociate the two FADDs, and if we do that, the algorithm fails
// spectacularly (PR24512).
// FIXME: If we ever have some kind of Machine FMF, this should be marked
// as non-fast and always be enabled. Why isn't SDAG FMF enough? Because
// there's also the MachineCombiner reassociations happening on Machine IR.
if (DAG.getTarget().Options.UnsafeFPMath)
return SDValue();
SDLoc DL(Op);
SDValue V = Op->getOperand(0);
MVT VecIntVT = V.getSimpleValueType();
bool Is128 = VecIntVT == MVT::v4i32;
MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
// If we convert to something else than the supported type, e.g., to v4f64,
// abort early.
if (VecFloatVT != Op->getSimpleValueType(0))
return SDValue();
assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
"Unsupported custom type");
// In the #idef/#else code, we have in common:
// - The vector of constants:
// -- 0x4b000000
// -- 0x53000000
// - A shift:
// -- v >> 16
// Create the splat vector for 0x4b000000.
SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
// Create the splat vector for 0x53000000.
SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
// Create the right shift.
SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
SDValue Low, High;
if (Subtarget.hasSSE41()) {
MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
// uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
// Low will be bitcasted right away, so do not bother bitcasting back to its
// original type.
Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
VecCstLowBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
// uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
// (uint4) 0x53000000, 0xaa);
SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
// High will be bitcasted right away, so do not bother bitcasting back to
// its original type.
High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
VecCstHighBitcast, DAG.getConstant(0xaa, DL, MVT::i32));
} else {
SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
// uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
// uint4 hi = (v >> 16) | (uint4) 0x53000000;
High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
}
// Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
SDValue VecCstFAdd = DAG.getConstantFP(
APFloat(APFloat::IEEEsingle(), APInt(32, 0xD3000080)), DL, VecFloatVT);
// float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
// TODO: Are there any fast-math-flags to propagate here?
SDValue FHigh =
DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
// return (float4) lo + fhi;
SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
}
static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue N0 = Op.getOperand(0);
MVT SrcVT = N0.getSimpleValueType();
SDLoc dl(Op);
switch (SrcVT.SimpleTy) {
default:
llvm_unreachable("Custom UINT_TO_FP is not supported!");
case MVT::v2i32:
return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
case MVT::v4i32:
case MVT::v8i32:
assert(!Subtarget.hasAVX512());
return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
}
}
SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
SelectionDAG &DAG) const {
SDValue N0 = Op.getOperand(0);
SDLoc dl(Op);
auto PtrVT = getPointerTy(DAG.getDataLayout());
if (Op.getSimpleValueType().isVector())
return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
MVT SrcVT = N0.getSimpleValueType();
MVT DstVT = Op.getSimpleValueType();
if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
(SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
// Conversions from unsigned i32 to f32/f64 are legal,
// using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
return Op;
}
if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
return V;
if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
if (SrcVT == MVT::i32 && X86ScalarSSEf64)
return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
return SDValue();
// Make a 64-bit buffer, and use it to build an FILD.
SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
if (SrcVT == MVT::i32) {
SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
StackSlot, MachinePointerInfo());
SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
OffsetSlot, MachinePointerInfo());
SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
return Fild;
}
assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
SDValue ValueToStore = Op.getOperand(0);
if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit())
// Bitcasting to f64 here allows us to do a single 64-bit store from
// an SSE register, avoiding the store forwarding penalty that would come
// with two 32-bit stores.
ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore, StackSlot,
MachinePointerInfo());
// For i64 source, we need to add the appropriate power of 2 if the input
// was negative. This is the same as the optimization in
// DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
// we must be careful to do the computation in x87 extended precision, not
// in SSE. (The generic code can't know it's OK to do this, or how to.)
int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
MachineMemOperand::MOLoad, 8, 8);
SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
MVT::i64, MMO);
APInt FF(32, 0x5F800000ULL);
// Check whether the sign bit is set.
SDValue SignSet = DAG.getSetCC(
dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
Op.getOperand(0), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
// Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
SDValue FudgePtr = DAG.getConstantPool(
ConstantInt::get(*DAG.getContext(), FF.zext(64)), PtrVT);
// Get a pointer to FF if the sign bit was set, or to 0 otherwise.
SDValue Zero = DAG.getIntPtrConstant(0, dl);
SDValue Four = DAG.getIntPtrConstant(4, dl);
SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Zero, Four);
FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
// Load the value out, extending it from f32 to f80.
// FIXME: Avoid the extend by constructing the right constant pool?
SDValue Fudge = DAG.getExtLoad(
ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr,
MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
/* Alignment = */ 4);
// Extend everything to 80 bits to force it to be done on x87.
// TODO: Are there any fast-math-flags to propagate here?
SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
DAG.getIntPtrConstant(0, dl));
}
// If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
// is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
// just return an <SDValue(), SDValue()> pair.
// Otherwise it is assumed to be a conversion from one of f32, f64 or f80
// to i16, i32 or i64, and we lower it to a legal sequence.
// If lowered to the final integer result we return a <result, SDValue()> pair.
// Otherwise we lower it to a sequence ending with a FIST, return a
// <FIST, StackSlot> pair, and the caller is responsible for loading
// the final integer result from StackSlot.
std::pair<SDValue,SDValue>
X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
bool IsSigned, bool IsReplace) const {
SDLoc DL(Op);
EVT DstTy = Op.getValueType();
EVT TheVT = Op.getOperand(0).getValueType();
auto PtrVT = getPointerTy(DAG.getDataLayout());
if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
// f16 must be promoted before using the lowering in this routine.
// fp128 does not use this lowering.
return std::make_pair(SDValue(), SDValue());
}
// If using FIST to compute an unsigned i64, we'll need some fixup
// to handle values above the maximum signed i64. A FIST is always
// used for the 32-bit subtarget, but also for f80 on a 64-bit target.
bool UnsignedFixup = !IsSigned &&
DstTy == MVT::i64 &&
(!Subtarget.is64Bit() ||
!isScalarFPTypeInSSEReg(TheVT));
if (!IsSigned && DstTy != MVT::i64 && !Subtarget.hasAVX512()) {
// Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
// The low 32 bits of the fist result will have the correct uint32 result.
assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
DstTy = MVT::i64;
}
assert(DstTy.getSimpleVT() <= MVT::i64 &&
DstTy.getSimpleVT() >= MVT::i16 &&
"Unknown FP_TO_INT to lower!");
// These are really Legal.
if (DstTy == MVT::i32 &&
isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
return std::make_pair(SDValue(), SDValue());
if (Subtarget.is64Bit() &&
DstTy == MVT::i64 &&
isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
return std::make_pair(SDValue(), SDValue());
// We lower FP->int64 into FISTP64 followed by a load from a temporary
// stack slot.
MachineFunction &MF = DAG.getMachineFunction();
unsigned MemSize = DstTy.getSizeInBits()/8;
int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
unsigned Opc;
switch (DstTy.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
}
SDValue Chain = DAG.getEntryNode();
SDValue Value = Op.getOperand(0);
SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
if (UnsignedFixup) {
//
// Conversion to unsigned i64 is implemented with a select,
// depending on whether the source value fits in the range
// of a signed i64. Let Thresh be the FP equivalent of
// 0x8000000000000000ULL.
//
// Adjust i32 = (Value < Thresh) ? 0 : 0x80000000;
// FistSrc = (Value < Thresh) ? Value : (Value - Thresh);
// Fist-to-mem64 FistSrc
// Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
// to XOR'ing the high 32 bits with Adjust.
//
// Being a power of 2, Thresh is exactly representable in all FP formats.
// For X87 we'd like to use the smallest FP type for this constant, but
// for DAG type consistency we have to match the FP operand type.
APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
bool LosesInfo = false;
if (TheVT == MVT::f64)
// The rounding mode is irrelevant as the conversion should be exact.
Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
&LosesInfo);
else if (TheVT == MVT::f80)
Status = Thresh.convert(APFloat::x87DoubleExtended(),
APFloat::rmNearestTiesToEven, &LosesInfo);
assert(Status == APFloat::opOK && !LosesInfo &&
"FP conversion should have been exact");
SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
SDValue Cmp = DAG.getSetCC(DL,
getSetCCResultType(DAG.getDataLayout(),
*DAG.getContext(), TheVT),
Value, ThreshVal, ISD::SETLT);
Adjust = DAG.getSelect(DL, MVT::i32, Cmp,
DAG.getConstant(0, DL, MVT::i32),
DAG.getConstant(0x80000000, DL, MVT::i32));
SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal);
Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(),
*DAG.getContext(), TheVT),
Value, ThreshVal, ISD::SETLT);
Value = DAG.getSelect(DL, TheVT, Cmp, Value, Sub);
}
// FIXME This causes a redundant load/store if the SSE-class value is already
// in memory, such as if it is on the callstack.
if (isScalarFPTypeInSSEReg(TheVT)) {
assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
Chain = DAG.getStore(Chain, DL, Value, StackSlot,
MachinePointerInfo::getFixedStack(MF, SSFI));
SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
SDValue Ops[] = {
Chain, StackSlot, DAG.getValueType(TheVT)
};
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
MachineMemOperand::MOLoad, MemSize, MemSize);
Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
Chain = Value.getValue(1);
SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
}
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
MachineMemOperand::MOStore, MemSize, MemSize);
if (UnsignedFixup) {
// Insert the FIST, load its result as two i32's,
// and XOR the high i32 with Adjust.
SDValue FistOps[] = { Chain, Value, StackSlot };
SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
FistOps, DstTy, MMO);
SDValue Low32 =
DAG.getLoad(MVT::i32, DL, FIST, StackSlot, MachinePointerInfo());
SDValue HighAddr = DAG.getMemBasePlusOffset(StackSlot, 4, DL);
SDValue High32 =
DAG.getLoad(MVT::i32, DL, FIST, HighAddr, MachinePointerInfo());
High32 = DAG.getNode(ISD::XOR, DL, MVT::i32, High32, Adjust);
if (Subtarget.is64Bit()) {
// Join High32 and Low32 into a 64-bit result.
// (High32 << 32) | Low32
Low32 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Low32);
High32 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, High32);
High32 = DAG.getNode(ISD::SHL, DL, MVT::i64, High32,
DAG.getConstant(32, DL, MVT::i8));
SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i64, High32, Low32);
return std::make_pair(Result, SDValue());
}
SDValue ResultOps[] = { Low32, High32 };
SDValue pair = IsReplace
? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResultOps)
: DAG.getMergeValues(ResultOps, DL);
return std::make_pair(pair, SDValue());
} else {
// Build the FP_TO_INT*_IN_MEM
SDValue Ops[] = { Chain, Value, StackSlot };
SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
Ops, DstTy, MMO);
return std::make_pair(FIST, StackSlot);
}
}
static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
SDLoc dl(Op);
assert(VT.isVector() && InVT.isVector() && "Expected vector type");
assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&
"Expected same number of elements");
assert((VT.getVectorElementType() == MVT::i16 ||
VT.getVectorElementType() == MVT::i32 ||
VT.getVectorElementType() == MVT::i64) &&
"Unexpected element type");
assert((InVT.getVectorElementType() == MVT::i8 ||
InVT.getVectorElementType() == MVT::i16 ||
InVT.getVectorElementType() == MVT::i32) &&
"Unexpected element type");
if (Subtarget.hasInt256())
return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
// Optimize vectors in AVX mode:
//
// v8i16 -> v8i32
// Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
// Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
// Concat upper and lower parts.
//
// v4i32 -> v4i64
// Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
// Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
// Concat upper and lower parts.
//
SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
SDValue Undef = DAG.getUNDEF(InVT);
bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements()/2);
OpLo = DAG.getBitcast(HVT, OpLo);
OpHi = DAG.getBitcast(HVT, OpHi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
}
// Helper to split and extend a v16i1 mask to v16i8 or v16i16.
static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
const SDLoc &dl, SelectionDAG &DAG) {
assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
DAG.getIntPtrConstant(0, dl));
SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
DAG.getIntPtrConstant(8, dl));
Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
}
static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
SDLoc DL(Op);
unsigned NumElts = VT.getVectorNumElements();
// For all vectors, but vXi8 we can just emit a sign_extend a shift. This
// avoids a constant pool load.
if (VT.getVectorElementType() != MVT::i8) {
SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
return DAG.getNode(ISD::SRL, DL, VT, Extend,
DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
}
// Extend VT if BWI is not supported.
MVT ExtVT = VT;
if (!Subtarget.hasBWI()) {
// If v16i32 is to be avoided, we'll need to split and concatenate.
if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
}
// Widen to 512-bits if VLX is not supported.
MVT WideVT = ExtVT;
if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
NumElts *= 512 / ExtVT.getSizeInBits();
InVT = MVT::getVectorVT(MVT::i1, NumElts);
In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
In, DAG.getIntPtrConstant(0, DL));
WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
NumElts);
}
SDValue One = DAG.getConstant(1, DL, WideVT);
SDValue Zero = getZeroVector(WideVT, Subtarget, DAG, DL);
SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
// Truncate if we had to extend above.
if (VT != ExtVT) {
WideVT = MVT::getVectorVT(MVT::i8, NumElts);
SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
}
// Extract back to 128/256-bit if we widened.
if (WideVT != VT)
SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
DAG.getIntPtrConstant(0, DL));
return SelectedVal;
}
static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = Op.getOperand(0);
MVT SVT = In.getSimpleValueType();
if (SVT.getVectorElementType() == MVT::i1)
return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
assert(Subtarget.hasAVX() && "Expected AVX support");
return LowerAVXExtend(Op, DAG, Subtarget);
}
/// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
/// It makes use of the fact that vectors with enough leading sign/zero bits
/// prevent the PACKSS/PACKUS from saturating the results.
/// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
/// within each 128-bit lane.
static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
const SDLoc &DL, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
"Unexpected PACK opcode");
// Requires SSE2 but AVX512 has fast vector truncate.
if (!Subtarget.hasSSE2() || Subtarget.hasAVX512() || !DstVT.isVector())
return SDValue();
EVT SrcVT = In.getValueType();
// No truncation required, we might get here due to recursive calls.
if (SrcVT == DstVT)
return In;
// We only support vector truncation to 64bits or greater from a
// 128bits or greater source.
unsigned DstSizeInBits = DstVT.getSizeInBits();
unsigned SrcSizeInBits = SrcVT.getSizeInBits();
if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
return SDValue();
unsigned NumElems = SrcVT.getVectorNumElements();
if (!isPowerOf2_32(NumElems))
return SDValue();
LLVMContext &Ctx = *DAG.getContext();
assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
// Pack to the largest type possible:
// vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
EVT InVT = MVT::i16, OutVT = MVT::i8;
if (SrcVT.getScalarSizeInBits() > 16 &&
(Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
InVT = MVT::i32;
OutVT = MVT::i16;
}
// 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
if (SrcVT.is128BitVector()) {
InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
In = DAG.getBitcast(InVT, In);
SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, In);
Res = extractSubVector(Res, 0, DAG, DL, 64);
return DAG.getBitcast(DstVT, Res);
}
// Extract lower/upper subvectors.
unsigned NumSubElts = NumElems / 2;
SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
unsigned SubSizeInBits = SrcSizeInBits / 2;
InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
// 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
Lo = DAG.getBitcast(InVT, Lo);
Hi = DAG.getBitcast(InVT, Hi);
SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
return DAG.getBitcast(DstVT, Res);
}
// AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
// AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
Lo = DAG.getBitcast(InVT, Lo);
Hi = DAG.getBitcast(InVT, Hi);
SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
// 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
// so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
Res = DAG.getBitcast(MVT::v4i64, Res);
Res = DAG.getVectorShuffle(MVT::v4i64, DL, Res, Res, {0, 2, 1, 3});
if (DstVT.is256BitVector())
return DAG.getBitcast(DstVT, Res);
// If 512bit -> 128bit truncate another stage.
EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
Res = DAG.getBitcast(PackedVT, Res);
return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
}
// Recursively pack lower/upper subvectors, concat result and pack again.
assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumSubElts);
Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
}
static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
MVT InVT = In.getSimpleValueType();
assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
// Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
if (InVT.getScalarSizeInBits() <= 16) {
if (Subtarget.hasBWI()) {
// legal, will go to VPMOVB2M, VPMOVW2M
if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
// We need to shift to get the lsb into sign position.
// Shift packed bytes not supported natively, bitcast to word
MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
In = DAG.getNode(ISD::SHL, DL, ExtVT,
DAG.getBitcast(ExtVT, In),
DAG.getConstant(ShiftInx, DL, ExtVT));
In = DAG.getBitcast(InVT, In);
}
return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
In, ISD::SETGT);
}
// Use TESTD/Q, extended vector to packed dword/qword.
assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
"Unexpected vector type.");
unsigned NumElts = InVT.getVectorNumElements();
assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
// We need to change to a wider element type that we have support for.
// For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
// For 16 element vectors we extend to v16i32 unless we are explicitly
// trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
// we need to split into two 8 element vectors which we can extend to v8i32,
// truncate and concat the results. There's an additional complication if
// the original type is v16i8. In that case we can't split the v16i8 so
// first we pre-extend it to v16i16 which we can split to v8i16, then extend
// to v8i32, truncate that to v8i1 and concat the two halves.
if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
if (InVT == MVT::v16i8) {
// First we need to sign extend up to 256-bits so we can split that.
InVT = MVT::v16i16;
In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In);
}
SDValue Lo = extract128BitVector(In, 0, DAG, DL);
SDValue Hi = extract128BitVector(In, 8, DAG, DL);
// We're split now, just emit two truncates and a concat. The two
// truncates will trigger legalization to come back to this function.
Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
}
// We either have 8 elements or we're allowed to use 512-bit vectors.
// If we have VLX, we want to use the narrowest vector that can get the
// job done so we use vXi32.
MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
InVT = ExtVT;
ShiftInx = InVT.getScalarSizeInBits() - 1;
}
if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
// We need to shift to get the lsb into sign position.
In = DAG.getNode(ISD::SHL, DL, InVT, In,
DAG.getConstant(ShiftInx, DL, InVT));
}
// If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
if (Subtarget.hasDQI())
return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
In, ISD::SETGT);
return DAG.getSetCC(DL, VT, In, getZeroVector(InVT, Subtarget, DAG, DL),
ISD::SETNE);
}
SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
MVT InVT = In.getSimpleValueType();
unsigned InNumEltBits = InVT.getScalarSizeInBits();
assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
"Invalid TRUNCATE operation");
if (VT.getVectorElementType() == MVT::i1)
return LowerTruncateVecI1(Op, DAG, Subtarget);
// vpmovqb/w/d, vpmovdb/w, vpmovwb
if (Subtarget.hasAVX512()) {
// word to byte only under BWI
if (InVT == MVT::v16i16 && !Subtarget.hasBWI()) { // v16i16 -> v16i8
// Make sure we're allowed to promote 512-bits.
if (Subtarget.canExtendTo512DQ())
return DAG.getNode(ISD::TRUNCATE, DL, VT,
DAG.getNode(X86ISD::VSEXT, DL, MVT::v16i32, In));
} else {
return Op;
}
}
unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
// Truncate with PACKUS if we are truncating a vector with leading zero bits
// that extend all the way to the packed/truncated value.
// Pre-SSE41 we can only use PACKUSWB.
KnownBits Known;
DAG.computeKnownBits(In, Known);
if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
if (SDValue V =
truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
return V;
// Truncate with PACKSS if we are truncating a vector with sign-bits that
// extend all the way to the packed/truncated value.
if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
if (SDValue V =
truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
return V;
if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
// On AVX2, v4i64 -> v4i32 becomes VPERMD.
if (Subtarget.hasInt256()) {
static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
In = DAG.getBitcast(MVT::v8i32, In);
In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
DAG.getIntPtrConstant(0, DL));
}
SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
DAG.getIntPtrConstant(0, DL));
SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
DAG.getIntPtrConstant(2, DL));
OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
static const int ShufMask[] = {0, 2, 4, 6};
return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
}
if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
// On AVX2, v8i32 -> v8i16 becomes PSHUFB.
if (Subtarget.hasInt256()) {
In = DAG.getBitcast(MVT::v32i8, In);
// The PSHUFB mask:
static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
-1, -1, -1, -1, -1, -1, -1, -1,
16, 17, 20, 21, 24, 25, 28, 29,
-1, -1, -1, -1, -1, -1, -1, -1 };
In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
In = DAG.getBitcast(MVT::v4i64, In);
static const int ShufMask2[] = {0, 2, -1, -1};
In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
DAG.getIntPtrConstant(0, DL));
return DAG.getBitcast(VT, In);
}
SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
DAG.getIntPtrConstant(0, DL));
SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
DAG.getIntPtrConstant(4, DL));
OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
// The PSHUFB mask:
static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
-1, -1, -1, -1, -1, -1, -1, -1};
OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);
OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
// The MOVLHPS Mask:
static const int ShufMask2[] = {0, 1, 4, 5};
SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
return DAG.getBitcast(MVT::v8i16, res);
}
// Handle truncation of V256 to V128 using shuffles.
assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
assert(Subtarget.hasAVX() && "256-bit vector without AVX!");
unsigned NumElems = VT.getVectorNumElements();
MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
SmallVector<int, 16> MaskVec(NumElems * 2, -1);
// Prepare truncation shuffle mask
for (unsigned i = 0; i != NumElems; ++i)
MaskVec[i] = i * 2;
In = DAG.getBitcast(NVT, In);
SDValue V = DAG.getVectorShuffle(NVT, DL, In, In, MaskVec);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
DAG.getIntPtrConstant(0, DL));
}
SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
MVT VT = Op.getSimpleValueType();
if (VT.isVector()) {
SDValue Src = Op.getOperand(0);
SDLoc dl(Op);
if (VT == MVT::v2i1 && Src.getSimpleValueType() == MVT::v2f64) {
MVT ResVT = MVT::v4i32;
MVT TruncVT = MVT::v4i1;
unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
if (!IsSigned && !Subtarget.hasVLX()) {
// Widen to 512-bits.
ResVT = MVT::v8i32;
TruncVT = MVT::v8i1;
Opc = ISD::FP_TO_UINT;
Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64,
DAG.getUNDEF(MVT::v8f64),
Src, DAG.getIntPtrConstant(0, dl));
}
SDValue Res = DAG.getNode(Opc, dl, ResVT, Src);
Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
DAG.getIntPtrConstant(0, dl));
}
assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
if (VT == MVT::v2i64 && Src.getSimpleValueType() == MVT::v2f32) {
return DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl, VT,
DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
DAG.getUNDEF(MVT::v2f32)));
}
return SDValue();
}
assert(!VT.isVector());
std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
IsSigned, /*IsReplace=*/ false);
SDValue FIST = Vals.first, StackSlot = Vals.second;
// If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
if (!FIST.getNode())
return Op;
if (StackSlot.getNode())
// Load the result.
return DAG.getLoad(VT, SDLoc(Op), FIST, StackSlot, MachinePointerInfo());
// The node is the result.
return FIST;
}
static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
SDLoc DL(Op);
MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
MVT SVT = In.getSimpleValueType();
assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
return DAG.getNode(X86ISD::VFPEXT, DL, VT,
DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
In, DAG.getUNDEF(SVT)));
}
/// The only differences between FABS and FNEG are the mask and the logic op.
/// FNEG also has a folding opportunity for FNEG(FABS(x)).
static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
"Wrong opcode for lowering FABS or FNEG.");
bool IsFABS = (Op.getOpcode() == ISD::FABS);
// If this is a FABS and it has an FNEG user, bail out to fold the combination
// into an FNABS. We'll lower the FABS after that if it is still in use.
if (IsFABS)
for (SDNode *User : Op->uses())
if (User->getOpcode() == ISD::FNEG)
return Op;
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
bool IsF128 = (VT == MVT::f128);
// FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
// decide if we should generate a 16-byte constant mask when we only need 4 or
// 8 bytes for the scalar case.
MVT LogicVT;
MVT EltVT;
if (VT.isVector()) {
LogicVT = VT;
EltVT = VT.getVectorElementType();
} else if (IsF128) {
// SSE instructions are used for optimized f128 logical operations.
LogicVT = MVT::f128;
EltVT = VT;
} else {
// There are no scalar bitwise logical SSE/AVX instructions, so we
// generate a 16-byte vector constant and logic op even for the scalar case.
// Using a 16-byte mask allows folding the load of the mask with
// the logic op, so it can save (~4 bytes) on code size.
LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
EltVT = VT;
}
unsigned EltBits = EltVT.getSizeInBits();
// For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
APInt MaskElt =
IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignMask(EltBits);
const fltSemantics &Sem =
EltVT == MVT::f64 ? APFloat::IEEEdouble() :
(IsF128 ? APFloat::IEEEquad() : APFloat::IEEEsingle());
SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
SDValue Op0 = Op.getOperand(0);
bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
unsigned LogicOp =
IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
if (VT.isVector() || IsF128)
return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
// For the scalar case extend to a 128-bit vector, perform the logic op,
// and extract the scalar result back out.
Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
DAG.getIntPtrConstant(0, dl));
}
static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
SDValue Mag = Op.getOperand(0);
SDValue Sign = Op.getOperand(1);
SDLoc dl(Op);
// If the sign operand is smaller, extend it first.
MVT VT = Op.getSimpleValueType();
if (Sign.getSimpleValueType().bitsLT(VT))
Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
// And if it is bigger, shrink it first.
if (Sign.getSimpleValueType().bitsGT(VT))
Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));
// At this point the operands and the result should have the same
// type, and that won't be f80 since that is not custom lowered.
bool IsF128 = (VT == MVT::f128);
assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
"Unexpected type in LowerFCOPYSIGN");
MVT EltVT = VT.getScalarType();
const fltSemantics &Sem =
EltVT == MVT::f64 ? APFloat::IEEEdouble()
: (IsF128 ? APFloat::IEEEquad() : APFloat::IEEEsingle());
// Perform all scalar logic operations as 16-byte vectors because there are no
// scalar FP logic instructions in SSE.
// TODO: This isn't necessary. If we used scalar types, we might avoid some
// unnecessary splats, but we might miss load folding opportunities. Should
// this decision be based on OptimizeForSize?
bool IsFakeVector = !VT.isVector() && !IsF128;
MVT LogicVT = VT;
if (IsFakeVector)
LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
// The mask constants are automatically splatted for vector types.
unsigned EltSizeInBits = VT.getScalarSizeInBits();
SDValue SignMask = DAG.getConstantFP(
APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
SDValue MagMask = DAG.getConstantFP(
APFloat(Sem, ~APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
// First, clear all bits but the sign bit from the second operand (sign).
if (IsFakeVector)
Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
// Next, clear the sign bit from the first operand (magnitude).
// TODO: If we had general constant folding for FP logic ops, this check
// wouldn't be necessary.
SDValue MagBits;
if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Mag)) {
APFloat APF = Op0CN->getValueAPF();
APF.clearSign();
MagBits = DAG.getConstantFP(APF, dl, LogicVT);
} else {
// If the magnitude operand wasn't a constant, we need to AND out the sign.
if (IsFakeVector)
Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
}
// OR the magnitude value with the sign bit.
SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
DAG.getIntPtrConstant(0, dl));
}
static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
SDValue N0 = Op.getOperand(0);
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
MVT OpVT = N0.getSimpleValueType();
assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
"Unexpected type for FGETSIGN");
// Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
Res = DAG.getZExtOrTrunc(Res, dl, VT);
Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
return Res;
}
/// Helper for creating a X86ISD::SETCC node.
static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
SelectionDAG &DAG) {
return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(Cond, dl, MVT::i8), EFLAGS);
}
// Check whether an OR'd tree is PTEST-able.
static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
if (!Subtarget.hasSSE41())
return SDValue();
if (!Op->hasOneUse())
return SDValue();
SDNode *N = Op.getNode();
SDLoc DL(N);
SmallVector<SDValue, 8> Opnds;
DenseMap<SDValue, unsigned> VecInMap;
SmallVector<SDValue, 8> VecIns;
EVT VT = MVT::Other;
// Recognize a special case where a vector is casted into wide integer to
// test all 0s.
Opnds.push_back(N->getOperand(0));
Opnds.push_back(N->getOperand(1));
for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
// BFS traverse all OR'd operands.
if (I->getOpcode() == ISD::OR) {
Opnds.push_back(I->getOperand(0));
Opnds.push_back(I->getOperand(1));
// Re-evaluate the number of nodes to be traversed.
e += 2; // 2 more nodes (LHS and RHS) are pushed.
continue;
}
// Quit if a non-EXTRACT_VECTOR_ELT
if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
// Quit if without a constant index.
SDValue Idx = I->getOperand(1);
if (!isa<ConstantSDNode>(Idx))
return SDValue();
SDValue ExtractedFromVec = I->getOperand(0);
DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
if (M == VecInMap.end()) {
VT = ExtractedFromVec.getValueType();
// Quit if not 128/256-bit vector.
if (!VT.is128BitVector() && !VT.is256BitVector())
return SDValue();
// Quit if not the same type.
if (VecInMap.begin() != VecInMap.end() &&
VT != VecInMap.begin()->first.getValueType())
return SDValue();
M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
VecIns.push_back(ExtractedFromVec);
}
M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
}
assert((VT.is128BitVector() || VT.is256BitVector()) &&
"Not extracted from 128-/256-bit vector.");
unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
for (DenseMap<SDValue, unsigned>::const_iterator
I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
// Quit if not all elements are used.
if (I->second != FullMask)
return SDValue();
}
MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
// Cast all vectors into TestVT for PTEST.
for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);
// If more than one full vector is evaluated, OR them first before PTEST.
for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
// Each iteration will OR 2 nodes and append the result until there is only
// 1 node left, i.e. the final OR'd value of all vectors.
SDValue LHS = VecIns[Slot];
SDValue RHS = VecIns[Slot + 1];
VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
}
SDValue Res = DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
VecIns.back(), VecIns.back());
return getSETCC(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE, Res, DL, DAG);
}
/// return true if \c Op has a use that doesn't just read flags.
static bool hasNonFlagsUse(SDValue Op) {
for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
++UI) {
SDNode *User = *UI;
unsigned UOpNo = UI.getOperandNo();
if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
// Look pass truncate.
UOpNo = User->use_begin().getOperandNo();
User = *User->use_begin();
}
if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
!(User->getOpcode() == ISD::SELECT && UOpNo == 0))
return true;
}
return false;
}
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent.
SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
SelectionDAG &DAG) const {
// CF and OF aren't always set the way we want. Determine which
// of these we need.
bool NeedCF = false;
bool NeedOF = false;
switch (X86CC) {
default: break;
case X86::COND_A: case X86::COND_AE:
case X86::COND_B: case X86::COND_BE:
NeedCF = true;
break;
case X86::COND_G: case X86::COND_GE:
case X86::COND_L: case X86::COND_LE:
case X86::COND_O: case X86::COND_NO: {
// Check if we really need to set the
// Overflow flag. If NoSignedWrap is present
// that is not actually needed.
switch (Op->getOpcode()) {
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
case ISD::SHL:
if (Op.getNode()->getFlags().hasNoSignedWrap())
break;
LLVM_FALLTHROUGH;
default:
NeedOF = true;
break;
}
break;
}
}
// See if we can use the EFLAGS value from the operand instead of
// doing a separate TEST. TEST always sets OF and CF to 0, so unless
// we prove that the arithmetic won't overflow, we can't use OF or CF.
if (Op.getResNo() != 0 || NeedOF || NeedCF) {
// Emit a CMP with 0, which is the TEST pattern.
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, dl, Op.getValueType()));
}
unsigned Opcode = 0;
unsigned NumOperands = 0;
// Truncate operations may prevent the merge of the SETCC instruction
// and the arithmetic instruction before it. Attempt to truncate the operands
// of the arithmetic instruction and use a reduced bit-width instruction.
bool NeedTruncation = false;
SDValue ArithOp = Op;
if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
SDValue Arith = Op->getOperand(0);
// Both the trunc and the arithmetic op need to have one user each.
if (Arith->hasOneUse())
switch (Arith.getOpcode()) {
default: break;
case ISD::ADD:
case ISD::SUB:
case ISD::AND:
case ISD::OR:
case ISD::XOR: {
NeedTruncation = true;
ArithOp = Arith;
}
}
}
// Sometimes flags can be set either with an AND or with an SRL/SHL
// instruction. SRL/SHL variant should be preferred for masks longer than this
// number of bits.
const int ShiftToAndMaxMaskWidth = 32;
const bool ZeroCheck = (X86CC == X86::COND_E || X86CC == X86::COND_NE);
// NOTICE: In the code below we use ArithOp to hold the arithmetic operation
// which may be the result of a CAST. We use the variable 'Op', which is the
// non-casted variable when we check for possible users.
switch (ArithOp.getOpcode()) {
case ISD::ADD:
// We only want to rewrite this as a target-specific node with attached
// flags if there is a reasonable chance of either using that to do custom
// instructions selection that can fold some of the memory operands, or if
// only the flags are used. If there are other uses, leave the node alone
// and emit a test instruction.
for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
UE = Op.getNode()->use_end(); UI != UE; ++UI)
if (UI->getOpcode() != ISD::CopyToReg &&
UI->getOpcode() != ISD::SETCC &&
UI->getOpcode() != ISD::STORE)
goto default_case;
if (auto *C = dyn_cast<ConstantSDNode>(ArithOp.getOperand(1))) {
// An add of one will be selected as an INC.
if (C->isOne() &&
(!Subtarget.slowIncDec() ||
DAG.getMachineFunction().getFunction().optForSize())) {
Opcode = X86ISD::INC;
NumOperands = 1;
break;
}
// An add of negative one (subtract of one) will be selected as a DEC.
if (C->isAllOnesValue() &&
(!Subtarget.slowIncDec() ||
DAG.getMachineFunction().getFunction().optForSize())) {
Opcode = X86ISD::DEC;
NumOperands = 1;
break;
}
}
// Otherwise use a regular EFLAGS-setting add.
Opcode = X86ISD::ADD;
NumOperands = 2;
break;
case ISD::SHL:
case ISD::SRL:
// If we have a constant logical shift that's only used in a comparison
// against zero turn it into an equivalent AND. This allows turning it into
// a TEST instruction later.
if (ZeroCheck && Op->hasOneUse() &&
isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
EVT VT = Op.getValueType();
unsigned BitWidth = VT.getSizeInBits();
unsigned ShAmt = Op->getConstantOperandVal(1);
if (ShAmt >= BitWidth) // Avoid undefined shifts.
break;
APInt Mask = ArithOp.getOpcode() == ISD::SRL
? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
: APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
if (!Mask.isSignedIntN(ShiftToAndMaxMaskWidth))
break;
Op = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
DAG.getConstant(Mask, dl, VT));
}
break;
case ISD::AND:
// If the primary 'and' result isn't used, don't bother using X86ISD::AND,
// because a TEST instruction will be better. However, AND should be
// preferred if the instruction can be combined into ANDN.
if (!hasNonFlagsUse(Op)) {
SDValue Op0 = ArithOp->getOperand(0);
SDValue Op1 = ArithOp->getOperand(1);
EVT VT = ArithOp.getValueType();
bool isAndn = isBitwiseNot(Op0) || isBitwiseNot(Op1);
bool isLegalAndnType = VT == MVT::i32 || VT == MVT::i64;
bool isProperAndn = isAndn && isLegalAndnType && Subtarget.hasBMI();
// If we cannot select an ANDN instruction, check if we can replace
// AND+IMM64 with a shift before giving up. This is possible for masks
// like 0xFF000000 or 0x00FFFFFF and if we care only about the zero flag.
if (!isProperAndn) {
if (!ZeroCheck)
break;
assert(!isa<ConstantSDNode>(Op0) && "AND node isn't canonicalized");
auto *CN = dyn_cast<ConstantSDNode>(Op1);
if (!CN)
break;
const APInt &Mask = CN->getAPIntValue();
if (Mask.isSignedIntN(ShiftToAndMaxMaskWidth))
break; // Prefer TEST instruction.
unsigned BitWidth = Mask.getBitWidth();
unsigned LeadingOnes = Mask.countLeadingOnes();
unsigned TrailingZeros = Mask.countTrailingZeros();
if (LeadingOnes + TrailingZeros == BitWidth) {
assert(TrailingZeros < VT.getSizeInBits() &&
"Shift amount should be less than the type width");
MVT ShTy = getScalarShiftAmountTy(DAG.getDataLayout(), VT);
SDValue ShAmt = DAG.getConstant(TrailingZeros, dl, ShTy);
Op = DAG.getNode(ISD::SRL, dl, VT, Op0, ShAmt);
break;
}
unsigned LeadingZeros = Mask.countLeadingZeros();
unsigned TrailingOnes = Mask.countTrailingOnes();
if (LeadingZeros + TrailingOnes == BitWidth) {
assert(LeadingZeros < VT.getSizeInBits() &&
"Shift amount should be less than the type width");
MVT ShTy = getScalarShiftAmountTy(DAG.getDataLayout(), VT);
SDValue ShAmt = DAG.getConstant(LeadingZeros, dl, ShTy);
Op = DAG.getNode(ISD::SHL, dl, VT, Op0, ShAmt);
break;
}
break;
}
}
LLVM_FALLTHROUGH;
case ISD::SUB:
case ISD::OR:
case ISD::XOR:
// Similar to ISD::ADD above, check if the uses will preclude useful
// lowering of the target-specific node.
for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
UE = Op.getNode()->use_end(); UI != UE; ++UI)
if (UI->getOpcode() != ISD::CopyToReg &&
UI->getOpcode() != ISD::SETCC &&
UI->getOpcode() != ISD::STORE)
goto default_case;
// Otherwise use a regular EFLAGS-setting instruction.
switch (ArithOp.getOpcode()) {
default: llvm_unreachable("unexpected operator!");
case ISD::SUB: Opcode = X86ISD::SUB; break;
case ISD::XOR: Opcode = X86ISD::XOR; break;
case ISD::AND: Opcode = X86ISD::AND; break;
case ISD::OR: Opcode = X86ISD::OR; break;
}
NumOperands = 2;
break;
case X86ISD::ADD:
case X86ISD::SUB:
case X86ISD::INC:
case X86ISD::DEC:
case X86ISD::OR:
case X86ISD::XOR:
case X86ISD::AND:
return SDValue(Op.getNode(), 1);
default:
default_case:
break;
}
// If we found that truncation is beneficial, perform the truncation and
// update 'Op'.
if (NeedTruncation) {
EVT VT = Op.getValueType();
SDValue WideVal = Op->getOperand(0);
EVT WideVT = WideVal.getValueType();
unsigned ConvertedOp = 0;
// Use a target machine opcode to prevent further DAGCombine
// optimizations that may separate the arithmetic operations
// from the setcc node.
switch (WideVal.getOpcode()) {
default: break;
case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
case ISD::AND: ConvertedOp = X86ISD::AND; break;
case ISD::OR: ConvertedOp = X86ISD::OR; break;
case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
}
if (ConvertedOp) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
Op = DAG.getNode(ConvertedOp, dl, VTs, V0, V1);
}
}
}
if (Opcode == 0) {
// Emit a CMP with 0, which is the TEST pattern.
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
DAG.getConstant(0, dl, Op.getValueType()));
}
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
return SDValue(New.getNode(), 1);
}
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent.
SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
const SDLoc &dl, SelectionDAG &DAG) const {
if (isNullConstant(Op1))
return EmitTest(Op0, X86CC, dl, DAG);
assert(!(isa<ConstantSDNode>(Op1) && Op0.getValueType() == MVT::i1) &&
"Unexpected comparison operation for MVT::i1 operands");
if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
// Only promote the compare up to I32 if it is a 16 bit operation
// with an immediate. 16 bit immediates are to be avoided.
if ((Op0.getValueType() == MVT::i16 &&
(isa<ConstantSDNode>(Op0) || isa<ConstantSDNode>(Op1))) &&
!DAG.getMachineFunction().getFunction().optForMinSize() &&
!Subtarget.isAtom()) {
unsigned ExtendOp =
isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
}
// Use SUB instead of CMP to enable CSE between SUB and CMP.
SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
return SDValue(Sub.getNode(), 1);
}
return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
}
/// Convert a comparison if required by the subtarget.
SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
SelectionDAG &DAG) const {
// If the subtarget does not support the FUCOMI instruction, floating-point
// comparisons have to be converted.
if (Subtarget.hasCMov() ||
Cmp.getOpcode() != X86ISD::CMP ||
!Cmp.getOperand(0).getValueType().isFloatingPoint() ||
!Cmp.getOperand(1).getValueType().isFloatingPoint())
return Cmp;
// The instruction selector will select an FUCOM instruction instead of
// FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
// build an SDNode sequence that transfers the result from FPSW into EFLAGS:
// (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
SDLoc dl(Cmp);
SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
DAG.getConstant(8, dl, MVT::i8));
SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
// Some 64-bit targets lack SAHF support, but they do support FCOMI.
assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?");
return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
}
/// Check if replacement of SQRT with RSQRT should be disabled.
bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
EVT VT = Op.getValueType();
// We never want to use both SQRT and RSQRT instructions for the same input.
if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
return false;
if (VT.isVector())
return Subtarget.hasFastVectorFSQRT();
return Subtarget.hasFastScalarFSQRT();
}
/// The minimum architected relative accuracy is 2^-12. We need one
/// Newton-Raphson step to have a good float result (24 bits of precision).
SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
SelectionDAG &DAG, int Enabled,
int &RefinementSteps,
bool &UseOneConstNR,
bool Reciprocal) const {
EVT VT = Op.getValueType();
// SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
// It is likely not profitable to do this for f64 because a double-precision
// rsqrt estimate with refinement on x86 prior to FMA requires at least 16
// instructions: convert to single, rsqrtss, convert back to double, refine
// (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
// along with FMA, this could be a throughput win.
// TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
// after legalize types.
if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
(VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
(VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
(VT == MVT::v8f32 && Subtarget.hasAVX()) ||
(VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
if (RefinementSteps == ReciprocalEstimate::Unspecified)
RefinementSteps = 1;
UseOneConstNR = false;
// There is no FSQRT for 512-bits, but there is RSQRT14.
unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
}
return SDValue();
}
/// The minimum architected relative accuracy is 2^-12. We need one
/// Newton-Raphson step to have a good float result (24 bits of precision).
SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
int Enabled,
int &RefinementSteps) const {
EVT VT = Op.getValueType();
// SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
// It is likely not profitable to do this for f64 because a double-precision
// reciprocal estimate with refinement on x86 prior to FMA requires
// 15 instructions: convert to single, rcpss, convert back to double, refine
// (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
// along with FMA, this could be a throughput win.
if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
(VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
(VT == MVT::v8f32 && Subtarget.hasAVX()) ||
(VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
// Enable estimate codegen with 1 refinement step for vector division.
// Scalar division estimates are disabled because they break too much
// real-world code. These defaults are intended to match GCC behavior.
if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
return SDValue();
if (RefinementSteps == ReciprocalEstimate::Unspecified)
RefinementSteps = 1;
// There is no FSQRT for 512-bits, but there is RCP14.
unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
}
return SDValue();
}
/// If we have at least two divisions that use the same divisor, convert to
/// multiplication by a reciprocal. This may need to be adjusted for a given
/// CPU if a division's cost is not at least twice the cost of a multiplication.
/// This is because we still need one division to calculate the reciprocal and
/// then we need two multiplies by that reciprocal as replacements for the
/// original divisions.
unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
return 2;
}
/// Create a BT (Bit Test) node - Test bit \p BitNo in \p Src and set condition
/// according to equal/not-equal condition code \p CC.
static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC,
const SDLoc &dl, SelectionDAG &DAG) {
// If Src is i8, promote it to i32 with any_extend. There is no i8 BT
// instruction. Since the shift amount is in-range-or-undefined, we know
// that doing a bittest on the i32 value is ok. We extend to i32 because
// the encoding for the i16 version is larger than the i32 version.
// Also promote i16 to i32 for performance / code size reason.
if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);
// See if we can use the 32-bit instruction instead of the 64-bit one for a
// shorter encoding. Since the former takes the modulo 32 of BitNo and the
// latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
// known to be zero.
if (Src.getValueType() == MVT::i64 &&
DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
// If the operand types disagree, extend the shift amount to match. Since
// BT ignores high bits (like shifts) we can use anyextend.
if (Src.getValueType() != BitNo.getValueType())
BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
return getSETCC(Cond, BT, dl , DAG);
}
/// Result of 'and' is compared against zero. Change to a BT node if possible.
static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
const SDLoc &dl, SelectionDAG &DAG) {
assert(And.getOpcode() == ISD::AND && "Expected AND node!");
SDValue Op0 = And.getOperand(0);
SDValue Op1 = And.getOperand(1);
if (Op0.getOpcode() == ISD::TRUNCATE)
Op0 = Op0.getOperand(0);
if (Op1.getOpcode() == ISD::TRUNCATE)
Op1 = Op1.getOperand(0);
SDValue LHS, RHS;
if (Op1.getOpcode() == ISD::SHL)
std::swap(Op0, Op1);
if (Op0.getOpcode() == ISD::SHL) {
if (isOneConstant(Op0.getOperand(0))) {
// If we looked past a truncate, check that it's only truncating away
// known zeros.
unsigned BitWidth = Op0.getValueSizeInBits();
unsigned AndBitWidth = And.getValueSizeInBits();
if (BitWidth > AndBitWidth) {
KnownBits Known;
DAG.computeKnownBits(Op0, Known);
if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
return SDValue();
}
LHS = Op1;
RHS = Op0.getOperand(1);
}
} else if (Op1.getOpcode() == ISD::Constant) {
ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
uint64_t AndRHSVal = AndRHS->getZExtValue();
SDValue AndLHS = Op0;
if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
LHS = AndLHS.getOperand(0);
RHS = AndLHS.getOperand(1);
} else {
// Use BT if the immediate can't be encoded in a TEST instruction or we
// are optimizing for size and the immedaite won't fit in a byte.
bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
isPowerOf2_64(AndRHSVal)) {
LHS = AndLHS;
RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl, LHS.getValueType());
}
}
}
if (LHS.getNode())
return getBitTestCondition(LHS, RHS, CC, dl, DAG);
return SDValue();
}
/// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
/// CMPs.
static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
SDValue &Op1) {
unsigned SSECC;
bool Swap = false;
// SSE Condition code mapping:
// 0 - EQ
// 1 - LT
// 2 - LE
// 3 - UNORD
// 4 - NEQ
// 5 - NLT
// 6 - NLE
// 7 - ORD
switch (SetCCOpcode) {
default: llvm_unreachable("Unexpected SETCC condition");
case ISD::SETOEQ:
case ISD::SETEQ: SSECC = 0; break;
case ISD::SETOGT:
case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETLT:
case ISD::SETOLT: SSECC = 1; break;
case ISD::SETOGE:
case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETLE:
case ISD::SETOLE: SSECC = 2; break;
case ISD::SETUO: SSECC = 3; break;
case ISD::SETUNE:
case ISD::SETNE: SSECC = 4; break;
case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETUGE: SSECC = 5; break;
case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
case ISD::SETUGT: SSECC = 6; break;
case ISD::SETO: SSECC = 7; break;
case ISD::SETUEQ: SSECC = 8; break;
case ISD::SETONE: SSECC = 12; break;
}
if (Swap)
std::swap(Op0, Op1);
return SSECC;
}
/// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
/// concatenate the result back.
static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
"Unsupported value type for operation");
unsigned NumElems = VT.getVectorNumElements();
SDLoc dl(Op);
SDValue CC = Op.getOperand(2);
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
// Extract the RHS vectors
SDValue RHS = Op.getOperand(1);
SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
// Issue the operation on the smaller types and concatenate the result back
MVT EltVT = VT.getVectorElementType();
MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
}
static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue CC = Op.getOperand(2);
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
assert(VT.getVectorElementType() == MVT::i1 &&
"Cannot set masked compare for this operation");
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
// If this is a seteq make sure any build vectors of all zeros are on the RHS.
// This helps with vptestm matching.
// TODO: Should we just canonicalize the setcc during DAG combine?
if ((SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE) &&
ISD::isBuildVectorAllZeros(Op0.getNode()))
std::swap(Op0, Op1);
// Prefer SETGT over SETLT.
if (SetCCOpcode == ISD::SETLT) {
SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
std::swap(Op0, Op1);
}
return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
}
/// Try to turn a VSETULT into a VSETULE by modifying its second
/// operand \p Op1. If non-trivial (for example because it's not constant)
/// return an empty value.
static SDValue ChangeVSETULTtoVSETULE(const SDLoc &dl, SDValue Op1,
SelectionDAG &DAG) {
BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
if (!BV)
return SDValue();
MVT VT = Op1.getSimpleValueType();
MVT EVT = VT.getVectorElementType();
unsigned n = VT.getVectorNumElements();
SmallVector<SDValue, 8> ULTOp1;
for (unsigned i = 0; i < n; ++i) {
ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EVT)
return SDValue();
// Avoid underflow.
APInt Val = Elt->getAPIntValue();
if (Val == 0)
return SDValue();
ULTOp1.push_back(DAG.getConstant(Val - 1, dl, EVT));
}
return DAG.getBuildVector(VT, dl, ULTOp1);
}
/// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
/// Op0 u<= Op1:
/// t = psubus Op0, Op1
/// pcmpeq t, <0..0>
static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
ISD::CondCode Cond, const SDLoc &dl,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
if (!Subtarget.hasSSE2())
return SDValue();
MVT VET = VT.getVectorElementType();
if (VET != MVT::i8 && VET != MVT::i16)
return SDValue();
switch (Cond) {
default:
return SDValue();
case ISD::SETULT: {
// If the comparison is against a constant we can turn this into a
// setule. With psubus, setule does not require a swap. This is
// beneficial because the constant in the register is no longer
// destructed as the destination so it can be hoisted out of a loop.
// Only do this pre-AVX since vpcmp* is no longer destructive.
if (Subtarget.hasAVX())
return SDValue();
SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
if (!ULEOp1)
return SDValue();
Op1 = ULEOp1;
break;
}
// Psubus is better than flip-sign because it requires no inversion.
case ISD::SETUGE:
std::swap(Op0, Op1);
break;
case ISD::SETULE:
break;
}
SDValue Result = DAG.getNode(X86ISD::SUBUS, dl, VT, Op0, Op1);
return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
getZeroVector(VT, Subtarget, DAG, dl));
}
static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue CC = Op.getOperand(2);
MVT VT = Op.getSimpleValueType();
ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
SDLoc dl(Op);
if (isFP) {
#ifndef NDEBUG
MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
assert(EltVT == MVT::f32 || EltVT == MVT::f64);
#endif
unsigned Opc;
if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
assert(VT.getVectorNumElements() <= 16);
Opc = X86ISD::CMPM;
} else {
Opc = X86ISD::CMPP;
// The SSE/AVX packed FP comparison nodes are defined with a
// floating-point vector result that matches the operand type. This allows
// them to work with an SSE1 target (integer vector types are not legal).
VT = Op0.getSimpleValueType();
}
// In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
// emit two comparisons and a logic op to tie them together.
SDValue Cmp;
unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1);
if (SSECC >= 8 && !Subtarget.hasAVX()) {
// LLVM predicate is SETUEQ or SETONE.
unsigned CC0, CC1;
unsigned CombineOpc;
if (Cond == ISD::SETUEQ) {
CC0 = 3; // UNORD
CC1 = 0; // EQ
CombineOpc = X86ISD::FOR;
} else {
assert(Cond == ISD::SETONE);
CC0 = 7; // ORD
CC1 = 4; // NEQ
CombineOpc = X86ISD::FAND;
}
SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(CC0, dl, MVT::i8));
SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(CC1, dl, MVT::i8));
Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
} else {
// Handle all other FP comparisons here.
Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(SSECC, dl, MVT::i8));
}
// If this is SSE/AVX CMPP, bitcast the result back to integer to match the
// result type of SETCC. The bitcast is expected to be optimized away
// during combining/isel.
if (Opc == X86ISD::CMPP)
Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
return Cmp;
}
MVT VTOp0 = Op0.getSimpleValueType();
assert(VTOp0 == Op1.getSimpleValueType() &&
"Expected operands with same type!");
assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
"Invalid number of packed elements for source and destination!");
// This is being called by type legalization because v2i32 is marked custom
// for result type legalization for v2f32.
if (VTOp0 == MVT::v2i32)
return SDValue();
// The non-AVX512 code below works under the assumption that source and
// destination types are the same.
assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
"Value types for source and destination must be the same!");
// Break 256-bit integer vector compare into smaller ones.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntVSETCC(Op, DAG);
// The result is boolean, but operands are int/float
if (VT.getVectorElementType() == MVT::i1) {
// In AVX-512 architecture setcc returns mask with i1 elements,
// But there is no compare instruction for i8 and i16 elements in KNL.
assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
"Unexpected operand type");
return LowerIntVSETCC_AVX512(Op, DAG);
}
// Lower using XOP integer comparisons.
if (VT.is128BitVector() && Subtarget.hasXOP()) {
// Translate compare code to XOP PCOM compare mode.
unsigned CmpMode = 0;
switch (Cond) {
default: llvm_unreachable("Unexpected SETCC condition");
case ISD::SETULT:
case ISD::SETLT: CmpMode = 0x00; break;
case ISD::SETULE:
case ISD::SETLE: CmpMode = 0x01; break;
case ISD::SETUGT:
case ISD::SETGT: CmpMode = 0x02; break;
case ISD::SETUGE:
case ISD::SETGE: CmpMode = 0x03; break;
case ISD::SETEQ: CmpMode = 0x04; break;
case ISD::SETNE: CmpMode = 0x05; break;
}
// Are we comparing unsigned or signed integers?
unsigned Opc =
ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
return DAG.getNode(Opc, dl, VT, Op0, Op1,
DAG.getConstant(CmpMode, dl, MVT::i8));
}
// (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
// Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
SDValue BC0 = peekThroughBitcasts(Op0);
if (BC0.getOpcode() == ISD::AND) {
APInt UndefElts;
SmallVector<APInt, 64> EltBits;
if (getTargetConstantBitsFromNode(BC0.getOperand(1),
VT.getScalarSizeInBits(), UndefElts,
EltBits, false, false)) {
if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
Cond = ISD::SETEQ;
Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
}
}
}
}
// If this is a SETNE against the signed minimum value, change it to SETGT.
// If this is a SETNE against the signed maximum value, change it to SETLT.
// which will be swapped to SETGT.
// Otherwise we use PCMPEQ+invert.
APInt ConstValue;
if (Cond == ISD::SETNE &&
ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
if (ConstValue.isMinSignedValue())
Cond = ISD::SETGT;
else if (ConstValue.isMaxSignedValue())
Cond = ISD::SETLT;
}
// If both operands are known non-negative, then an unsigned compare is the
// same as a signed compare and there's no need to flip signbits.
// TODO: We could check for more general simplifications here since we're
// computing known bits.
bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
!(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
// Special case: Use min/max operations for unsigned compares. We only want
// to do this for unsigned compares if we need to flip signs or if it allows
// use to avoid an invert.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (ISD::isUnsignedIntSetCC(Cond) &&
(FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
TLI.isOperationLegal(ISD::UMIN, VT)) {
bool Invert = false;
unsigned Opc;
switch (Cond) {
default: llvm_unreachable("Unexpected condition code");
case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
case ISD::SETULE: Opc = ISD::UMIN; break;
case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
case ISD::SETUGE: Opc = ISD::UMAX; break;
}
SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
// If the logical-not of the result is required, perform that now.
if (Invert)
Result = DAG.getNOT(dl, Result, VT);
return Result;
}
// Try to use SUBUS and PCMPEQ.
if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
return V;
// We are handling one of the integer comparisons here. Since SSE only has
// GT and EQ comparisons for integer, swapping operands and multiple
// operations may be required for some comparisons.
unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
: X86ISD::PCMPGT;
bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
Cond == ISD::SETGE || Cond == ISD::SETUGE;
bool Invert = Cond == ISD::SETNE ||
(Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
if (Swap)
std::swap(Op0, Op1);
// Check that the operation in question is available (most are plain SSE2,
// but PCMPGTQ and PCMPEQQ have different requirements).
if (VT == MVT::v2i64) {
if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
assert(Subtarget.hasSSE2() && "Don't know how to lower!");
// First cast everything to the right type.
Op0 = DAG.getBitcast(MVT::v4i32, Op0);
Op1 = DAG.getBitcast(MVT::v4i32, Op1);
// Since SSE has no unsigned integer comparisons, we need to flip the sign
// bits of the inputs before performing those operations. The lower
// compare is always unsigned.
SDValue SB;
if (FlipSigns) {
SB = DAG.getConstant(0x80000000U, dl, MVT::v4i32);
} else {
SDValue Sign = DAG.getConstant(0x80000000U, dl, MVT::i32);
SDValue Zero = DAG.getConstant(0x00000000U, dl, MVT::i32);
SB = DAG.getBuildVector(MVT::v4i32, dl, {Sign, Zero, Sign, Zero});
}
Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
// Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
// Create masks for only the low parts/high parts of the 64 bit integers.
static const int MaskHi[] = { 1, 1, 3, 3 };
static const int MaskLo[] = { 0, 0, 2, 2 };
SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
if (Invert)
Result = DAG.getNOT(dl, Result, MVT::v4i32);
return DAG.getBitcast(VT, Result);
}
if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
// If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
// pcmpeqd + pshufd + pand.
assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
// First cast everything to the right type.
Op0 = DAG.getBitcast(MVT::v4i32, Op0);
Op1 = DAG.getBitcast(MVT::v4i32, Op1);
// Do the compare.
SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
// Make sure the lower and upper halves are both all-ones.
static const int Mask[] = { 1, 0, 3, 2 };
SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
if (Invert)
Result = DAG.getNOT(dl, Result, MVT::v4i32);
return DAG.getBitcast(VT, Result);
}
}
// Since SSE has no unsigned integer comparisons, we need to flip the sign
// bits of the inputs before performing those operations.
if (FlipSigns) {
MVT EltVT = VT.getVectorElementType();
SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
VT);
Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
}
SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
// If the logical-not of the result is required, perform that now.
if (Invert)
Result = DAG.getNOT(dl, Result, VT);
return Result;
}
// Try to select this as a KTEST+SETCC if possible.
static SDValue EmitKTEST(SDValue Op0, SDValue Op1, ISD::CondCode CC,
const SDLoc &dl, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Only support equality comparisons.
if (CC != ISD::SETEQ && CC != ISD::SETNE)
return SDValue();
// Must be a bitcast from vXi1.
if (Op0.getOpcode() != ISD::BITCAST)
return SDValue();
Op0 = Op0.getOperand(0);
MVT VT = Op0.getSimpleValueType();
if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
!(Subtarget.hasDQI() && VT == MVT::v8i1) &&
!(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
return SDValue();
X86::CondCode X86CC;
if (isNullConstant(Op1)) {
X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
} else if (isAllOnesConstant(Op1)) {
// C flag is set for all ones.
X86CC = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
} else
return SDValue();
// If the input is an OR, we can combine it's operands into the KORTEST.
SDValue LHS = Op0;
SDValue RHS = Op0;
if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
LHS = Op0.getOperand(0);
RHS = Op0.getOperand(1);
}
SDValue KORTEST = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
return getSETCC(X86CC, KORTEST, dl, DAG);
}
SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
MVT VT = Op.getSimpleValueType();
if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDLoc dl(Op);
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
// Optimize to BT if possible.
// Lower (X & (1 << N)) == 0 to BT(X, N).
// Lower ((X >>u N) & 1) != 0 to BT(X, N).
// Lower ((X >>s N) & 1) != 0 to BT(X, N).
if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
if (SDValue NewSetCC = LowerAndToBT(Op0, CC, dl, DAG))
return NewSetCC;
}
// Try to use PTEST for a tree ORs equality compared with 0.
// TODO: We could do AND tree with all 1s as well by using the C flag.
if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
if (SDValue NewSetCC = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG))
return NewSetCC;
}
// Try to lower using KTEST.
if (SDValue NewSetCC = EmitKTEST(Op0, Op1, CC, dl, DAG, Subtarget))
return NewSetCC;
// Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
// these.
if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
(CC == ISD::SETEQ || CC == ISD::SETNE)) {
// If the input is a setcc, then reuse the input setcc or use a new one with
// the inverted condition.
if (Op0.getOpcode() == X86ISD::SETCC) {
X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
if (!Invert)
return Op0;
CCode = X86::GetOppositeBranchCondition(CCode);
return getSETCC(CCode, Op0.getOperand(1), dl, DAG);
}
}
bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
X86::CondCode X86CC = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
if (X86CC == X86::COND_INVALID)
return SDValue();
SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
return getSETCC(X86CC, EFLAGS, dl, DAG);
}
SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
SDValue Carry = Op.getOperand(2);
SDValue Cond = Op.getOperand(3);
SDLoc DL(Op);
assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
// Recreate the carry if needed.
EVT CarryVT = Carry.getValueType();
APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
Carry, DAG.getConstant(NegOne, DL, CarryVT));
SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
return getSETCC(CC, Cmp.getValue(1), DL, DAG);
}
/// Return true if opcode is a X86 logical comparison.
static bool isX86LogicalCmp(SDValue Op) {
unsigned Opc = Op.getOpcode();
if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
Opc == X86ISD::SAHF)
return true;
if (Op.getResNo() == 1 &&
(Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
Opc == X86ISD::SBB || Opc == X86ISD::SMUL ||
Opc == X86ISD::INC || Opc == X86ISD::DEC || Opc == X86ISD::OR ||
Opc == X86ISD::XOR || Opc == X86ISD::AND))
return true;
if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
return true;
return false;
}
static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
if (V.getOpcode() != ISD::TRUNCATE)
return false;
SDValue VOp0 = V.getOperand(0);
unsigned InBits = VOp0.getValueSizeInBits();
unsigned Bits = V.getValueSizeInBits();
return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
}
SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
bool AddTest = true;
SDValue Cond = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue Op2 = Op.getOperand(2);
SDLoc DL(Op);
MVT VT = Op1.getSimpleValueType();
SDValue CC;
// Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
// are available or VBLENDV if AVX is available.
// Otherwise FP cmovs get lowered into a less efficient branch sequence later.
if (Cond.getOpcode() == ISD::SETCC &&
((Subtarget.hasSSE2() && VT == MVT::f64) ||
(Subtarget.hasSSE1() && VT == MVT::f32)) &&
VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
unsigned SSECC = translateX86FSETCC(
cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
if (Subtarget.hasAVX512()) {
SDValue Cmp = DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0,
CondOp1, DAG.getConstant(SSECC, DL, MVT::i8));
assert(!VT.isVector() && "Not a scalar type?");
return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
}
if (SSECC < 8 || Subtarget.hasAVX()) {
SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
DAG.getConstant(SSECC, DL, MVT::i8));
// If we have AVX, we can use a variable vector select (VBLENDV) instead
// of 3 logic instructions for size savings and potentially speed.
// Unfortunately, there is no scalar form of VBLENDV.
// If either operand is a constant, don't try this. We can expect to
// optimize away at least one of the logic instructions later in that
// case, so that sequence would be faster than a variable blend.
// BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
// uses XMM0 as the selection register. That may need just as many
// instructions as the AND/ANDN/OR sequence due to register moves, so
// don't bother.
if (Subtarget.hasAVX() &&
!isa<ConstantFPSDNode>(Op1) && !isa<ConstantFPSDNode>(Op2)) {
// Convert to vectors, do a VSELECT, and convert back to scalar.
// All of the conversions should be optimized away.
MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
VCmp = DAG.getBitcast(VCmpVT, VCmp);
SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
VSel, DAG.getIntPtrConstant(0, DL));
}
SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
}
}
// AVX512 fallback is to lower selects of scalar floats to masked moves.
if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) {
SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
}
// For v64i1 without 64-bit support we need to split and rejoin.
if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
assert(Subtarget.hasBWI() && "Expected BWI to be legal");
SDValue Op1Lo = extractSubVector(Op1, 0, DAG, DL, 32);
SDValue Op2Lo = extractSubVector(Op2, 0, DAG, DL, 32);
SDValue Op1Hi = extractSubVector(Op1, 32, DAG, DL, 32);
SDValue Op2Hi = extractSubVector(Op2, 32, DAG, DL, 32);
SDValue Lo = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Lo, Op2Lo);
SDValue Hi = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Hi, Op2Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
}
if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
SDValue Op1Scalar;
if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
Op1Scalar = Op1.getOperand(0);
SDValue Op2Scalar;
if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
Op2Scalar = Op2.getOperand(0);
if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
SDValue newSelect = DAG.getSelect(DL, Op1Scalar.getValueType(), Cond,
Op1Scalar, Op2Scalar);
if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
return DAG.getBitcast(VT, newSelect);
SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
DAG.getIntPtrConstant(0, DL));
}
}
if (VT == MVT::v4i1 || VT == MVT::v2i1) {
SDValue zeroConst = DAG.getIntPtrConstant(0, DL);
Op1 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i1,
DAG.getUNDEF(MVT::v8i1), Op1, zeroConst);
Op2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i1,
DAG.getUNDEF(MVT::v8i1), Op2, zeroConst);
SDValue newSelect = DAG.getSelect(DL, MVT::v8i1, Cond, Op1, Op2);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, newSelect, zeroConst);
}
if (Cond.getOpcode() == ISD::SETCC) {
if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
Cond = NewCond;
// If the condition was updated, it's possible that the operands of the
// select were also updated (for example, EmitTest has a RAUW). Refresh
// the local references to the select operands in case they got stale.
Op1 = Op.getOperand(1);
Op2 = Op.getOperand(2);
}
}
// (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
// (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
// (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
// (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
// (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
// (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
if (Cond.getOpcode() == X86ISD::SETCC &&
Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
isNullConstant(Cond.getOperand(1).getOperand(1))) {
SDValue Cmp = Cond.getOperand(1);
unsigned CondCode =
cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
(CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
SDValue CmpOp0 = Cmp.getOperand(0);
// Apply further optimizations for special cases
// (select (x != 0), -1, 0) -> neg & sbb
// (select (x == 0), 0, -1) -> neg & sbb
if (isNullConstant(Y) &&
(isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs, Zero, CmpOp0);
SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
DAG.getConstant(X86::COND_B, DL, MVT::i8),
SDValue(Neg.getNode(), 1));
return Res;
}
Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
Cmp = ConvertCmpIfNecessary(Cmp, DAG);
SDValue Res = // Res = 0 or -1.
DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
DAG.getConstant(X86::COND_B, DL, MVT::i8), Cmp);
if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
Res = DAG.getNOT(DL, Res, Res.getValueType());
if (!isNullConstant(Op2))
Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
return Res;
} else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
Cmp.getOperand(0).getOpcode() == ISD::AND &&
isOneConstant(Cmp.getOperand(0).getOperand(1))) {
SDValue CmpOp0 = Cmp.getOperand(0);
SDValue Src1, Src2;
// true if Op2 is XOR or OR operator and one of its operands
// is equal to Op1
// ( a , a op b) || ( b , a op b)
auto isOrXorPattern = [&]() {
if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
(Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
Src1 =
Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
Src2 = Op1;
return true;
}
return false;
};
if (isOrXorPattern()) {
SDValue Neg;
unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
// we need mask of all zeros or ones with same size of the other
// operands.
if (CmpSz > VT.getSizeInBits())
Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
else if (CmpSz < VT.getSizeInBits())
Neg = DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
DAG.getConstant(1, DL, VT));
else
Neg = CmpOp0;
SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
Neg); // -(and (x, 0x1))
SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
}
}
}
// Look past (and (setcc_carry (cmp ...)), 1).
if (Cond.getOpcode() == ISD::AND &&
Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
isOneConstant(Cond.getOperand(1)))
Cond = Cond.getOperand(0);
// If condition flag is set by a X86ISD::CMP, then use it as the condition
// setting operand in place of the X86ISD::SETCC.
unsigned CondOpcode = Cond.getOpcode();
if (CondOpcode == X86ISD::SETCC ||
CondOpcode == X86ISD::SETCC_CARRY) {
CC = Cond.getOperand(0);
SDValue Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
MVT VT = Op.getSimpleValueType();
bool IllegalFPCMov = false;
if (VT.isFloatingPoint() && !VT.isVector() &&
!isScalarFPTypeInSSEReg(VT)) // FPStack?
IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
Opc == X86ISD::BT) { // FIXME
Cond = Cmp;
AddTest = false;
}
} else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
Cond.getOperand(0).getValueType() != MVT::i8)) {
SDValue LHS = Cond.getOperand(0);
SDValue RHS = Cond.getOperand(1);
unsigned X86Opcode;
unsigned X86Cond;
SDVTList VTs;
switch (CondOpcode) {
case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
default: llvm_unreachable("unexpected overflowing operator");
}
if (CondOpcode == ISD::UMULO)
VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
MVT::i32);
else
VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
if (CondOpcode == ISD::UMULO)
Cond = X86Op.getValue(2);
else
Cond = X86Op.getValue(1);
CC = DAG.getConstant(X86Cond, DL, MVT::i8);
AddTest = false;
}
if (AddTest) {
// Look past the truncate if the high bits are known zero.
if (isTruncWithZeroHighBitsInput(Cond, DAG))
Cond = Cond.getOperand(0);
// We know the result of AND is compared against zero. Try to match
// it to BT.
if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
if (SDValue NewSetCC = LowerAndToBT(Cond, ISD::SETNE, DL, DAG)) {
CC = NewSetCC.getOperand(0);
Cond = NewSetCC.getOperand(1);
AddTest = false;
}
}
}
if (AddTest) {
CC = DAG.getConstant(X86::COND_NE, DL, MVT::i8);
Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
}
// a < b ? -1 : 0 -> RES = ~setcc_carry
// a < b ? 0 : -1 -> RES = setcc_carry
// a >= b ? -1 : 0 -> RES = setcc_carry
// a >= b ? 0 : -1 -> RES = ~setcc_carry
if (Cond.getOpcode() == X86ISD::SUB) {
Cond = ConvertCmpIfNecessary(Cond, DAG);
unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
(isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
(isNullConstant(Op1) || isNullConstant(Op2))) {
SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
DAG.getConstant(X86::COND_B, DL, MVT::i8),
Cond);
if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
return DAG.getNOT(DL, Res, Res.getValueType());
return Res;
}
}
// X86 doesn't have an i8 cmov. If both operands are the result of a truncate
// widen the cmov and push the truncate through. This avoids introducing a new
// branch during isel and doesn't add any extensions.
if (Op.getValueType() == MVT::i8 &&
Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
if (T1.getValueType() == T2.getValueType() &&
// Blacklist CopyFromReg to avoid partial register stalls.
T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
CC, Cond);
return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
}
}
// Promote i16 cmovs if it won't prevent folding a load.
if (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) && !MayFoldLoad(Op2)) {
Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
SDValue Ops[] = { Op2, Op1, CC, Cond };
SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
}
// X86ISD::CMOV means set the result (which is operand 1) to the RHS if
// condition is true.
SDValue Ops[] = { Op2, Op1, CC, Cond };
return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
}
static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
MVT VTElt = VT.getVectorElementType();
SDLoc dl(Op);
unsigned NumElts = VT.getVectorNumElements();
// Extend VT if the scalar type is v8/v16 and BWI is not supported.
MVT ExtVT = VT;
if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
// If v16i32 is to be avoided, we'll need to split and concatenate.
if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
return SplitAndExtendv16i1(ISD::SIGN_EXTEND, VT, In, dl, DAG);
ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
}
// Widen to 512-bits if VLX is not supported.
MVT WideVT = ExtVT;
if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
NumElts *= 512 / ExtVT.getSizeInBits();
InVT = MVT::getVectorVT(MVT::i1, NumElts);
In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
In, DAG.getIntPtrConstant(0, dl));
WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
}
SDValue V;
MVT WideEltVT = WideVT.getVectorElementType();
if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
(Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
V = DAG.getNode(ISD::SIGN_EXTEND, dl, WideVT, In);
} else {
SDValue NegOne = getOnesVector(WideVT, DAG, dl);
SDValue Zero = getZeroVector(WideVT, Subtarget, DAG, dl);
V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
}
// Truncate if we had to extend i16/i8 above.
if (VT != ExtVT) {
WideVT = MVT::getVectorVT(VTElt, NumElts);
V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
}
// Extract back to 128/256-bit if we widened.
if (WideVT != VT)
V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
DAG.getIntPtrConstant(0, dl));
return V;
}
static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
if (InVT.getVectorElementType() == MVT::i1)
return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
assert(Subtarget.hasAVX() && "Expected AVX support");
return LowerAVXExtend(Op, DAG, Subtarget);
}
// Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
// For sign extend this needs to handle all vector sizes and SSE4.1 and
// non-SSE4.1 targets. For zero extend this should only handle inputs of
// MVT::v64i8 when BWI is not supported, but AVX512 is.
static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = Op->getOperand(0);
MVT VT = Op->getSimpleValueType(0);
MVT InVT = In.getSimpleValueType();
assert(VT.getSizeInBits() == InVT.getSizeInBits());
MVT SVT = VT.getVectorElementType();
MVT InSVT = InVT.getVectorElementType();
assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
return SDValue();
if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
return SDValue();
if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
!(VT.is256BitVector() && Subtarget.hasInt256()) &&
!(VT.is512BitVector() && Subtarget.hasAVX512()))
return SDValue();
SDLoc dl(Op);
// For 256-bit vectors, we only need the lower (128-bit) half of the input.
// For 512-bit vectors, we need 128-bits or 256-bits.
if (VT.getSizeInBits() > 128) {
// Input needs to be at least the same number of elements as output, and
// at least 128-bits.
int InSize = InSVT.getSizeInBits() * VT.getVectorNumElements();
In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
}
assert((Op.getOpcode() != ISD::ZERO_EXTEND_VECTOR_INREG ||
InVT == MVT::v64i8) && "Zero extend only for v64i8 input!");
// SSE41 targets can use the pmovsx* instructions directly for 128-bit results,
// so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
// need to be handled here for 256/512-bit results.
if (Subtarget.hasInt256()) {
assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
unsigned ExtOpc = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ?
X86ISD::VSEXT : X86ISD::VZEXT;
return DAG.getNode(ExtOpc, dl, VT, In);
}
// We should only get here for sign extend.
assert(Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG &&
"Unexpected opcode!");
// pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
SDValue Curr = In;
MVT CurrVT = InVT;
// As SRAI is only available on i16/i32 types, we expand only up to i32
// and handle i64 separately.
while (CurrVT != VT && CurrVT.getVectorElementType() != MVT::i32) {
Curr = DAG.getNode(X86ISD::UNPCKL, dl, CurrVT, DAG.getUNDEF(CurrVT), Curr);
MVT CurrSVT = MVT::getIntegerVT(CurrVT.getScalarSizeInBits() * 2);
CurrVT = MVT::getVectorVT(CurrSVT, CurrVT.getVectorNumElements() / 2);
Curr = DAG.getBitcast(CurrVT, Curr);
}
SDValue SignExt = Curr;
if (CurrVT != InVT) {
unsigned SignExtShift =
CurrVT.getScalarSizeInBits() - InSVT.getSizeInBits();
SignExt = DAG.getNode(X86ISD::VSRAI, dl, CurrVT, Curr,
DAG.getConstant(SignExtShift, dl, MVT::i8));
}
if (CurrVT == VT)
return SignExt;
if (VT == MVT::v2i64 && CurrVT == MVT::v4i32) {
SDValue Sign = DAG.getNode(X86ISD::VSRAI, dl, CurrVT, Curr,
DAG.getConstant(31, dl, MVT::i8));
SDValue Ext = DAG.getVectorShuffle(CurrVT, dl, SignExt, Sign, {0, 4, 1, 5});
return DAG.getBitcast(VT, Ext);
}
return SDValue();
}
static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op->getSimpleValueType(0);
SDValue In = Op->getOperand(0);
MVT InVT = In.getSimpleValueType();
SDLoc dl(Op);
if (InVT.getVectorElementType() == MVT::i1)
return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
assert(VT.isVector() && InVT.isVector() && "Expected vector type");
assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&
"Expected same number of elements");
assert((VT.getVectorElementType() == MVT::i16 ||
VT.getVectorElementType() == MVT::i32 ||
VT.getVectorElementType() == MVT::i64) &&
"Unexpected element type");
assert((InVT.getVectorElementType() == MVT::i8 ||
InVT.getVectorElementType() == MVT::i16 ||
InVT.getVectorElementType() == MVT::i32) &&
"Unexpected element type");
if (Subtarget.hasInt256())
return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
// Optimize vectors in AVX mode
// Sign extend v8i16 to v8i32 and
// v4i32 to v4i64
//
// Divide input vector into two parts
// for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
// use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
// concat the vectors to original VT
unsigned NumElems = InVT.getVectorNumElements();
SDValue Undef = DAG.getUNDEF(InVT);
SmallVector<int,8> ShufMask1(NumElems, -1);
for (unsigned i = 0; i != NumElems/2; ++i)
ShufMask1[i] = i;
SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, ShufMask1);
SmallVector<int,8> ShufMask2(NumElems, -1);
for (unsigned i = 0; i != NumElems/2; ++i)
ShufMask2[i] = i + NumElems/2;
SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, ShufMask2);
MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements() / 2);
OpLo = DAG.getSignExtendVectorInReg(OpLo, dl, HalfVT);
OpHi = DAG.getSignExtendVectorInReg(OpHi, dl, HalfVT);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
}
static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
SDLoc dl(St);
SDValue StoredVal = St->getValue();
// Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
assert(StoredVal.getValueType().isVector() &&
StoredVal.getValueType().getVectorElementType() == MVT::i1 &&
StoredVal.getValueType().getVectorNumElements() <= 8 &&
"Unexpected VT");
assert(!St->isTruncatingStore() && "Expected non-truncating store");
assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
"Expected AVX512F without AVX512DQI");
StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
DAG.getUNDEF(MVT::v8i1), StoredVal,
DAG.getIntPtrConstant(0, dl));
StoredVal = DAG.getBitcast(MVT::i8, StoredVal);
return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
St->getPointerInfo(), St->getAlignment(),
St->getMemOperand()->getFlags());
}
// Lower vector extended loads using a shuffle. If SSSE3 is not available we
// may emit an illegal shuffle but the expansion is still better than scalar
// code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
// we'll emit a shuffle and a arithmetic shift.
// FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
// TODO: It is possible to support ZExt by zeroing the undef values during
// the shuffle phase or after the shuffle.
static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT RegVT = Op.getSimpleValueType();
assert(RegVT.isVector() && "We only custom lower vector sext loads.");
assert(RegVT.isInteger() &&
"We only custom lower integer vector sext loads.");
LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
SDLoc dl(Ld);
EVT MemVT = Ld->getMemoryVT();
// Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
if (RegVT.isVector() && RegVT.getVectorElementType() == MVT::i1) {
assert(EVT(RegVT) == MemVT && "Expected non-extending load");
assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
"Expected AVX512F without AVX512DQI");
SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
Ld->getPointerInfo(), Ld->getAlignment(),
Ld->getMemOperand()->getFlags());
// Replace chain users with the new chain.
assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewLd.getValue(1));
SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
DAG.getBitcast(MVT::v8i1, NewLd),
DAG.getIntPtrConstant(0, dl));
return DAG.getMergeValues({Extract, NewLd.getValue(1)}, dl);
}
// Nothing useful we can do without SSE2 shuffles.
assert(Subtarget.hasSSE2() && "We only custom lower sext loads with SSE2.");
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned RegSz = RegVT.getSizeInBits();
ISD::LoadExtType Ext = Ld->getExtensionType();
assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
&& "Only anyext and sext are currently implemented.");
assert(MemVT != RegVT && "Cannot extend to the same type");
assert(MemVT.isVector() && "Must load a vector from memory");
unsigned NumElems = RegVT.getVectorNumElements();
unsigned MemSz = MemVT.getSizeInBits();
assert(RegSz > MemSz && "Register size must be greater than the mem size");
if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget.hasInt256()) {
// The only way in which we have a legal 256-bit vector result but not the
// integer 256-bit operations needed to directly lower a sextload is if we
// have AVX1 but not AVX2. In that case, we can always emit a sextload to
// a 128-bit vector and a normal sign_extend to 256-bits that should get
// correctly legalized. We do this late to allow the canonical form of
// sextload to persist throughout the rest of the DAG combiner -- it wants
// to fold together any extensions it can, and so will fuse a sign_extend
// of an sextload into a sextload targeting a wider value.
SDValue Load;
if (MemSz == 128) {
// Just switch this to a normal load.
assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
"it must be a legal 128-bit vector "
"type!");
Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
Ld->getPointerInfo(), Ld->getAlignment(),
Ld->getMemOperand()->getFlags());
} else {
assert(MemSz < 128 &&
"Can't extend a type wider than 128 bits to a 256 bit vector!");
// Do an sext load to a 128-bit vector type. We want to use the same
// number of elements, but elements half as wide. This will end up being
// recursively lowered by this routine, but will succeed as we definitely
// have all the necessary features if we're using AVX1.
EVT HalfEltVT =
EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
Load =
DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
Ld->getMemOperand()->getFlags());
}
// Replace chain users with the new chain.
assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
// Finally, do a normal sign-extend to the desired register.
return DAG.getSExtOrTrunc(Load, dl, RegVT);
}
// All sizes must be a power of two.
assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
"Non-power-of-two elements are not custom lowered!");
// Attempt to load the original value using scalar loads.
// Find the largest scalar type that divides the total loaded size.
MVT SclrLoadTy = MVT::i8;
for (MVT Tp : MVT::integer_valuetypes()) {
if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
SclrLoadTy = Tp;
}
}
// On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
(64 <= MemSz))
SclrLoadTy = MVT::f64;
// Calculate the number of scalar loads that we need to perform
// in order to load our vector from memory.
unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
"Can only lower sext loads with a single scalar load!");
unsigned loadRegZize = RegSz;
if (Ext == ISD::SEXTLOAD && RegSz >= 256)
loadRegZize = 128;
// If we don't have BWI we won't be able to create the shuffle needed for
// v8i8->v8i64.
if (Ext == ISD::EXTLOAD && !Subtarget.hasBWI() && RegVT == MVT::v8i64 &&
MemVT == MVT::v8i8)
loadRegZize = 128;
// Represent our vector as a sequence of elements which are the
// largest scalar that we can load.
EVT LoadUnitVecVT = EVT::getVectorVT(
*DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
// Represent the data using the same element type that is stored in
// memory. In practice, we ''widen'' MemVT.
EVT WideVecVT =
EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
loadRegZize / MemVT.getScalarSizeInBits());
assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
"Invalid vector type");
// We can't shuffle using an illegal type.
assert(TLI.isTypeLegal(WideVecVT) &&
"We only lower types that form legal widened vector types");
SmallVector<SDValue, 8> Chains;
SDValue Ptr = Ld->getBasePtr();
SDValue Increment = DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, dl,
TLI.getPointerTy(DAG.getDataLayout()));
SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
for (unsigned i = 0; i < NumLoads; ++i) {
// Perform a single load.
SDValue ScalarLoad =
DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
Ld->getAlignment(), Ld->getMemOperand()->getFlags());
Chains.push_back(ScalarLoad.getValue(1));
// Create the first element type using SCALAR_TO_VECTOR in order to avoid
// another round of DAGCombining.
if (i == 0)
Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
else
Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
ScalarLoad, DAG.getIntPtrConstant(i, dl));
Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
}
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
// Bitcast the loaded value to a vector of the original element type, in
// the size of the target vector type.
SDValue SlicedVec = DAG.getBitcast(WideVecVT, Res);
unsigned SizeRatio = RegSz / MemSz;
if (Ext == ISD::SEXTLOAD) {
// If we have SSE4.1, we can directly emit a VSEXT node.
if (Subtarget.hasSSE41()) {
SDValue Sext = getExtendInVec(X86ISD::VSEXT, dl, RegVT, SlicedVec, DAG);
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
return Sext;
}
// Otherwise we'll use SIGN_EXTEND_VECTOR_INREG to sign extend the lowest
// lanes.
assert(TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND_VECTOR_INREG, RegVT) &&
"We can't implement a sext load without SIGN_EXTEND_VECTOR_INREG!");
SDValue Shuff = DAG.getSignExtendVectorInReg(SlicedVec, dl, RegVT);
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
return Shuff;
}
if (Ext == ISD::EXTLOAD && !Subtarget.hasBWI() && RegVT == MVT::v8i64 &&
MemVT == MVT::v8i8) {
SDValue Sext = getExtendInVec(X86ISD::VZEXT, dl, RegVT, SlicedVec, DAG);
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
return Sext;
}
// Redistribute the loaded elements into the different locations.
SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i * SizeRatio] = i;
SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
DAG.getUNDEF(WideVecVT), ShuffleVec);
// Bitcast to the requested type.
Shuff = DAG.getBitcast(RegVT, Shuff);
DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
return Shuff;
}
/// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
/// each of which has no other use apart from the AND / OR.
static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
Opc = Op.getOpcode();
if (Opc != ISD::OR && Opc != ISD::AND)
return false;
return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
Op.getOperand(0).hasOneUse() &&
Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
Op.getOperand(1).hasOneUse());
}
/// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
/// SETCC node has a single use.
static bool isXor1OfSetCC(SDValue Op) {
if (Op.getOpcode() != ISD::XOR)
return false;
if (isOneConstant(Op.getOperand(1)))
return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
Op.getOperand(0).hasOneUse();
return false;
}
SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
bool addTest = true;
SDValue Chain = Op.getOperand(0);
SDValue Cond = Op.getOperand(1);
SDValue Dest = Op.getOperand(2);
SDLoc dl(Op);
SDValue CC;
bool Inverted = false;
if (Cond.getOpcode() == ISD::SETCC) {
// Check for setcc([su]{add,sub,mul}o == 0).
if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
isNullConstant(Cond.getOperand(1)) &&
Cond.getOperand(0).getResNo() == 1 &&
(Cond.getOperand(0).getOpcode() == ISD::SADDO ||
Cond.getOperand(0).getOpcode() == ISD::UADDO ||
Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
Cond.getOperand(0).getOpcode() == ISD::USUBO ||
Cond.getOperand(0).getOpcode() == ISD::SMULO ||
Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
Inverted = true;
Cond = Cond.getOperand(0);
} else {
if (SDValue NewCond = LowerSETCC(Cond, DAG))
Cond = NewCond;
}
}
#if 0
// FIXME: LowerXALUO doesn't handle these!!
else if (Cond.getOpcode() == X86ISD::ADD ||
Cond.getOpcode() == X86ISD::SUB ||
Cond.getOpcode() == X86ISD::SMUL ||
Cond.getOpcode() == X86ISD::UMUL)
Cond = LowerXALUO(Cond, DAG);
#endif
// Look pass (and (setcc_carry (cmp ...)), 1).
if (Cond.getOpcode() == ISD::AND &&
Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
isOneConstant(Cond.getOperand(1)))
Cond = Cond.getOperand(0);
// If condition flag is set by a X86ISD::CMP, then use it as the condition
// setting operand in place of the X86ISD::SETCC.
unsigned CondOpcode = Cond.getOpcode();
if (CondOpcode == X86ISD::SETCC ||
CondOpcode == X86ISD::SETCC_CARRY) {
CC = Cond.getOperand(0);
SDValue Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
// FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
Cond = Cmp;
addTest = false;
} else {
switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
default: break;
case X86::COND_O:
case X86::COND_B:
// These can only come from an arithmetic instruction with overflow,
// e.g. SADDO, UADDO.
Cond = Cond.getOperand(1);
addTest = false;
break;
}
}
}
CondOpcode = Cond.getOpcode();
if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
Cond.getOperand(0).getValueType() != MVT::i8)) {
SDValue LHS = Cond.getOperand(0);
SDValue RHS = Cond.getOperand(1);
unsigned X86Opcode;
unsigned X86Cond;
SDVTList VTs;
// Keep this in sync with LowerXALUO, otherwise we might create redundant
// instructions that can't be removed afterwards (i.e. X86ISD::ADD and
// X86ISD::INC).
switch (CondOpcode) {
case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
case ISD::SADDO:
if (isOneConstant(RHS)) {
X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
break;
}
X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
case ISD::SSUBO:
if (isOneConstant(RHS)) {
X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
break;
}
X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
default: llvm_unreachable("unexpected overflowing operator");
}
if (Inverted)
X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
if (CondOpcode == ISD::UMULO)
VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
MVT::i32);
else
VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
if (CondOpcode == ISD::UMULO)
Cond = X86Op.getValue(2);
else
Cond = X86Op.getValue(1);
CC = DAG.getConstant(X86Cond, dl, MVT::i8);
addTest = false;
} else {
unsigned CondOpc;
if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
SDValue Cmp = Cond.getOperand(0).getOperand(1);
if (CondOpc == ISD::OR) {
// Also, recognize the pattern generated by an FCMP_UNE. We can emit
// two branches instead of an explicit OR instruction with a
// separate test.
if (Cmp == Cond.getOperand(1).getOperand(1) &&
isX86LogicalCmp(Cmp)) {
CC = Cond.getOperand(0).getOperand(0);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
CC = Cond.getOperand(1).getOperand(0);
Cond = Cmp;
addTest = false;
}
} else { // ISD::AND
// Also, recognize the pattern generated by an FCMP_OEQ. We can emit
// two branches instead of an explicit AND instruction with a
// separate test. However, we only do this if this block doesn't
// have a fall-through edge, because this requires an explicit
// jmp when the condition is false.
if (Cmp == Cond.getOperand(1).getOperand(1) &&
isX86LogicalCmp(Cmp) &&
Op.getNode()->hasOneUse()) {
X86::CondCode CCode =
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
CC = DAG.getConstant(CCode, dl, MVT::i8);
SDNode *User = *Op.getNode()->use_begin();
// Look for an unconditional branch following this conditional branch.
// We need this because we need to reverse the successors in order
// to implement FCMP_OEQ.
if (User->getOpcode() == ISD::BR) {
SDValue FalseBB = User->getOperand(1);
SDNode *NewBR =
DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
assert(NewBR == User);
(void)NewBR;
Dest = FalseBB;
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
X86::CondCode CCode =
(X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
CC = DAG.getConstant(CCode, dl, MVT::i8);
Cond = Cmp;
addTest = false;
}
}
}
} else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
// Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
// It should be transformed during dag combiner except when the condition
// is set by a arithmetics with overflow node.
X86::CondCode CCode =
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
CC = DAG.getConstant(CCode, dl, MVT::i8);
Cond = Cond.getOperand(0).getOperand(1);
addTest = false;
} else if (Cond.getOpcode() == ISD::SETCC &&
cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
// For FCMP_OEQ, we can emit
// two branches instead of an explicit AND instruction with a
// separate test. However, we only do this if this block doesn't
// have a fall-through edge, because this requires an explicit
// jmp when the condition is false.
if (Op.getNode()->hasOneUse()) {
SDNode *User = *Op.getNode()->use_begin();
// Look for an unconditional branch following this conditional branch.
// We need this because we need to reverse the successors in order
// to implement FCMP_OEQ.
if (User->getOpcode() == ISD::BR) {
SDValue FalseBB = User->getOperand(1);
SDNode *NewBR =
DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
assert(NewBR == User);
(void)NewBR;
Dest = FalseBB;
SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
Cond.getOperand(0), Cond.getOperand(1));
Cmp = ConvertCmpIfNecessary(Cmp, DAG);
CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
Cond = Cmp;
addTest = false;
}
}
} else if (Cond.getOpcode() == ISD::SETCC &&
cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
// For FCMP_UNE, we can emit
// two branches instead of an explicit AND instruction with a
// separate test. However, we only do this if this block doesn't
// have a fall-through edge, because this requires an explicit
// jmp when the condition is false.
if (Op.getNode()->hasOneUse()) {
SDNode *User = *Op.getNode()->use_begin();
// Look for an unconditional branch following this conditional branch.
// We need this because we need to reverse the successors in order
// to implement FCMP_UNE.
if (User->getOpcode() == ISD::BR) {
SDValue FalseBB = User->getOperand(1);
SDNode *NewBR =
DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
assert(NewBR == User);
(void)NewBR;
SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
Cond.getOperand(0), Cond.getOperand(1));
Cmp = ConvertCmpIfNecessary(Cmp, DAG);
CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
CC = DAG.getConstant(X86::COND_NP, dl, MVT::i8);
Cond = Cmp;
addTest = false;
Dest = FalseBB;
}
}
}
}
if (addTest) {
// Look pass the truncate if the high bits are known zero.
if (isTruncWithZeroHighBitsInput(Cond, DAG))
Cond = Cond.getOperand(0);
// We know the result of AND is compared against zero. Try to match
// it to BT.
if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
if (SDValue NewSetCC = LowerAndToBT(Cond, ISD::SETNE, dl, DAG)) {
CC = NewSetCC.getOperand(0);
Cond = NewSetCC.getOperand(1);
addTest = false;
}
}
}
if (addTest) {
X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
CC = DAG.getConstant(X86Cond, dl, MVT::i8);
Cond = EmitTest(Cond, X86Cond, dl, DAG);
}
Cond = ConvertCmpIfNecessary(Cond, DAG);
return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cond);
}
// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
// Calls to _alloca are needed to probe the stack when allocating more than 4k
// bytes in one go. Touching the stack at 4K increments is necessary to ensure
// that the guard pages used by the OS virtual memory manager are allocated in
// correct sequence.
SDValue
X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
bool SplitStack = MF.shouldSplitStack();
bool EmitStackProbe = !getStackProbeSymbolName(MF).empty();
bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
SplitStack || EmitStackProbe;
SDLoc dl(Op);
// Get the inputs.
SDNode *Node = Op.getNode();
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
EVT VT = Node->getValueType(0);
// Chain the dynamic stack allocation so that it doesn't modify the stack
// pointer when other instructions are using the stack.
Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
bool Is64Bit = Subtarget.is64Bit();
MVT SPTy = getPointerTy(DAG.getDataLayout());
SDValue Result;
if (!Lower) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
" not tell us which reg is the stack pointer!");
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
Chain = SP.getValue(1);
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
unsigned StackAlign = TFI.getStackAlignment();
Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
if (Align > StackAlign)
Result = DAG.getNode(ISD::AND, dl, VT, Result,
DAG.getConstant(-(uint64_t)Align, dl, VT));
Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
} else if (SplitStack) {
MachineRegisterInfo &MRI = MF.getRegInfo();
if (Is64Bit) {
// The 64 bit implementation of segmented stacks needs to clobber both r10
// r11. This makes it impossible to use it along with nested parameters.
const Function &F = MF.getFunction();
for (const auto &A : F.args()) {
if (A.hasNestAttr())
report_fatal_error("Cannot use segmented stacks with functions that "
"have nested arguments.");
}
}
const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
DAG.getRegister(Vreg, SPTy));
} else {
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned SPReg = RegInfo->getStackRegister();
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
Chain = SP.getValue(1);
if (Align) {
SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
DAG.getConstant(-(uint64_t)Align, dl, VT));
Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
}
Result = SP;
}
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
SDValue Ops[2] = {Result, Chain};
return DAG.getMergeValues(Ops, dl);
}
SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
auto PtrVT = getPointerTy(MF.getDataLayout());
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
SDLoc DL(Op);
if (!Subtarget.is64Bit() ||
Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
MachinePointerInfo(SV));
}
// __va_list_tag:
// gp_offset (0 - 6 * 8)
// fp_offset (48 - 48 + 8 * 16)
// overflow_arg_area (point to parameters coming in memory).
// reg_save_area
SmallVector<SDValue, 8> MemOps;
SDValue FIN = Op.getOperand(1);
// Store gp_offset
SDValue Store = DAG.getStore(
Op.getOperand(0), DL,
DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
MachinePointerInfo(SV));
MemOps.push_back(Store);
// Store fp_offset
FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
Store = DAG.getStore(
Op.getOperand(0), DL,
DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
MachinePointerInfo(SV, 4));
MemOps.push_back(Store);
// Store ptr to overflow_arg_area
FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
Store =
DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
MemOps.push_back(Store);
// Store ptr to reg_save_area.
FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
Store = DAG.getStore(
Op.getOperand(0), DL, RSFIN, FIN,
MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
MemOps.push_back(Store);
return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
}
SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
assert(Subtarget.is64Bit() &&
"LowerVAARG only handles 64-bit va_arg!");
assert(Op.getNumOperands() == 4);
MachineFunction &MF = DAG.getMachineFunction();
if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
// The Win64 ABI uses char* instead of a structure.
return DAG.expandVAArg(Op.getNode());
SDValue Chain = Op.getOperand(0);
SDValue SrcPtr = Op.getOperand(1);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
unsigned Align = Op.getConstantOperandVal(3);
SDLoc dl(Op);
EVT ArgVT = Op.getNode()->getValueType(0);
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
uint8_t ArgMode;
// Decide which area this value should be read from.
// TODO: Implement the AMD64 ABI in its entirety. This simple
// selection mechanism works only for the basic types.
if (ArgVT == MVT::f80) {
llvm_unreachable("va_arg for f80 not yet implemented");
} else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
} else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
} else {
llvm_unreachable("Unhandled argument type in LowerVAARG");
}
if (ArgMode == 2) {
// Sanity Check: Make sure using fp_offset makes sense.
assert(!Subtarget.useSoftFloat() &&
!(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
Subtarget.hasSSE1());
}
// Insert VAARG_64 node into the DAG
// VAARG_64 returns two values: Variable Argument Address, Chain
SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
DAG.getConstant(ArgMode, dl, MVT::i8),
DAG.getConstant(Align, dl, MVT::i32)};
SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
SDValue VAARG = DAG.getMemIntrinsicNode(
X86ISD::VAARG_64, dl,
VTs, InstOps, MVT::i64,
MachinePointerInfo(SV),
/*Align=*/0,
MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
Chain = VAARG.getValue(1);
// Load the next argument and return it
return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
}
static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
// where a va_list is still an i8*.
assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
if (Subtarget.isCallingConvWin64(
DAG.getMachineFunction().getFunction().getCallingConv()))
// Probably a Win64 va_copy.
return DAG.expandVACopy(Op.getNode());
SDValue Chain = Op.getOperand(0);
SDValue DstPtr = Op.getOperand(1);
SDValue SrcPtr = Op.getOperand(2);
const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
SDLoc DL(Op);
return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
false, false,
MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
}
/// Handle vector element shifts where the shift amount is a constant.
/// Takes immediate version of shift as input.
static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
SDValue SrcOp, uint64_t ShiftAmt,
SelectionDAG &DAG) {
MVT ElementType = VT.getVectorElementType();
// Bitcast the source vector to the output type, this is mainly necessary for
// vXi8/vXi64 shifts.
if (VT != SrcOp.getSimpleValueType())
SrcOp = DAG.getBitcast(VT, SrcOp);
// Fold this packed shift into its first operand if ShiftAmt is 0.
if (ShiftAmt == 0)
return SrcOp;
// Check for ShiftAmt >= element width
if (ShiftAmt >= ElementType.getSizeInBits()) {
if (Opc == X86ISD::VSRAI)
ShiftAmt = ElementType.getSizeInBits() - 1;
else
return DAG.getConstant(0, dl, VT);
}
assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
&& "Unknown target vector shift-by-constant node");
// Fold this packed vector shift into a build vector if SrcOp is a
// vector of Constants or UNDEFs.
if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
SmallVector<SDValue, 8> Elts;
unsigned NumElts = SrcOp->getNumOperands();
ConstantSDNode *ND;
switch(Opc) {
default: llvm_unreachable("Unknown opcode!");
case X86ISD::VSHLI:
for (unsigned i=0; i!=NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
continue;
}
ND = cast<ConstantSDNode>(CurrentOp);
const APInt &C = ND->getAPIntValue();
Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
}
break;
case X86ISD::VSRLI:
for (unsigned i=0; i!=NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
continue;
}
ND = cast<ConstantSDNode>(CurrentOp);
const APInt &C = ND->getAPIntValue();
Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
}
break;
case X86ISD::VSRAI:
for (unsigned i=0; i!=NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
continue;
}
ND = cast<ConstantSDNode>(CurrentOp);
const APInt &C = ND->getAPIntValue();
Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
}
break;
}
return DAG.getBuildVector(VT, dl, Elts);
}
return DAG.getNode(Opc, dl, VT, SrcOp,
DAG.getConstant(ShiftAmt, dl, MVT::i8));
}
/// Handle vector element shifts where the shift amount may or may not be a
/// constant. Takes immediate version of shift as input.
static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
SDValue SrcOp, SDValue ShAmt,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT SVT = ShAmt.getSimpleValueType();
assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
// Catch shift-by-constant.
if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
CShAmt->getZExtValue(), DAG);
// Change opcode to non-immediate version
switch (Opc) {
default: llvm_unreachable("Unknown target vector shift node");
case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
}
// Need to build a vector containing shift amount.
// SSE/AVX packed shifts only use the lower 64-bit of the shift count.
// +=================+============+=======================================+
// | ShAmt is | HasSSE4.1? | Construct ShAmt vector as |
// +=================+============+=======================================+
// | i64 | Yes, No | Use ShAmt as lowest elt |
// | i32 | Yes | zero-extend in-reg |
// | (i32 zext(i16)) | Yes | zero-extend in-reg |
// | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) |
// +=================+============+=======================================+
if (SVT == MVT::i64)
ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
else if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
ShAmt = ShAmt.getOperand(0);
ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v8i16, ShAmt);
ShAmt = DAG.getZeroExtendVectorInReg(ShAmt, SDLoc(ShAmt), MVT::v2i64);
} else if (Subtarget.hasSSE41() &&
ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
ShAmt = DAG.getZeroExtendVectorInReg(ShAmt, SDLoc(ShAmt), MVT::v2i64);
} else {
SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT),
DAG.getUNDEF(SVT), DAG.getUNDEF(SVT)};
ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
}
// The return type has to be a 128-bit type with the same element
// type as the input type.
MVT EltVT = VT.getVectorElementType();
MVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
ShAmt = DAG.getBitcast(ShVT, ShAmt);
return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
}
/// Return Mask with the necessary casting or extending
/// for \p Mask according to \p MaskVT when lowering masking intrinsics
static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
const X86Subtarget &Subtarget, SelectionDAG &DAG,
const SDLoc &dl) {
if (isAllOnesConstant(Mask))
return DAG.getConstant(1, dl, MaskVT);
if (X86::isZeroNode(Mask))
return DAG.getConstant(0, dl, MaskVT);
if (MaskVT.bitsGT(Mask.getSimpleValueType())) {
// Mask should be extended
Mask = DAG.getNode(ISD::ANY_EXTEND, dl,
MVT::getIntegerVT(MaskVT.getSizeInBits()), Mask);
}
if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
// In case 32bit mode, bitcast i64 is illegal, extend/split it.
SDValue Lo, Hi;
Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
DAG.getConstant(0, dl, MVT::i32));
Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
DAG.getConstant(1, dl, MVT::i32));
Lo = DAG.getBitcast(MVT::v32i1, Lo);
Hi = DAG.getBitcast(MVT::v32i1, Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
} else {
MVT BitcastVT = MVT::getVectorVT(MVT::i1,
Mask.getSimpleValueType().getSizeInBits());
// In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
// are extracted by EXTRACT_SUBVECTOR.
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
DAG.getBitcast(BitcastVT, Mask),
DAG.getIntPtrConstant(0, dl));
}
}
/// Return (and \p Op, \p Mask) for compare instructions or
/// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
/// necessary casting or extending for \p Mask when lowering masking intrinsics
static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
unsigned OpcodeSelect = ISD::VSELECT;
SDLoc dl(Op);
if (isAllOnesConstant(Mask))
return Op;
SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
switch (Op.getOpcode()) {
default: break;
case X86ISD::CMPM:
case X86ISD::CMPM_RND:
case X86ISD::VPSHUFBITQMB:
case X86ISD::VFPCLASS:
return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
case ISD::TRUNCATE:
case X86ISD::VTRUNC:
case X86ISD::VTRUNCS:
case X86ISD::VTRUNCUS:
case X86ISD::CVTPS2PH:
// We can't use ISD::VSELECT here because it is not always "Legal"
// for the destination type. For example vpmovqb require only AVX512
// and vselect that can operate on byte element type require BWI
OpcodeSelect = X86ISD::SELECT;
break;
}
if (PreservedSrc.isUndef())
PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
}
/// Creates an SDNode for a predicated scalar operation.
/// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
/// The mask is coming as MVT::i8 and it should be transformed
/// to MVT::v1i1 while lowering masking intrinsics.
/// The main difference between ScalarMaskingNode and VectorMaskingNode is using
/// "X86select" instead of "vselect". We just can't create the "vselect" node
/// for a scalar instruction.
static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
SDValue PreservedSrc,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
if (MaskConst->getZExtValue() & 0x1)
return Op;
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
SDValue IMask = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Mask);
if (Op.getOpcode() == X86ISD::FSETCCM ||
Op.getOpcode() == X86ISD::FSETCCM_RND ||
Op.getOpcode() == X86ISD::VFPCLASSS)
return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
if (PreservedSrc.isUndef())
PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
}
static int getSEHRegistrationNodeSize(const Function *Fn) {
if (!Fn->hasPersonalityFn())
report_fatal_error(
"querying registration node size for function without personality");
// The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
// WinEHStatePass for the full struct definition.
switch (classifyEHPersonality(Fn->getPersonalityFn())) {
case EHPersonality::MSVC_X86SEH: return 24;
case EHPersonality::MSVC_CXX: return 16;
default: break;
}
report_fatal_error(
"can only recover FP for 32-bit MSVC EH personality functions");
}
/// When the MSVC runtime transfers control to us, either to an outlined
/// function or when returning to a parent frame after catching an exception, we
/// recover the parent frame pointer by doing arithmetic on the incoming EBP.
/// Here's the math:
/// RegNodeBase = EntryEBP - RegNodeSize
/// ParentFP = RegNodeBase - ParentFrameOffset
/// Subtracting RegNodeSize takes us to the offset of the registration node, and
/// subtracting the offset (negative on x86) takes us back to the parent FP.
static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
SDValue EntryEBP) {
MachineFunction &MF = DAG.getMachineFunction();
SDLoc dl;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
// It's possible that the parent function no longer has a personality function
// if the exceptional code was optimized away, in which case we just return
// the incoming EBP.
if (!Fn->hasPersonalityFn())
return EntryEBP;
// Get an MCSymbol that will ultimately resolve to the frame offset of the EH
// registration, or the .set_setframe offset.
MCSymbol *OffsetSym =
MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
GlobalValue::dropLLVMManglingEscape(Fn->getName()));
SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
SDValue ParentFrameOffset =
DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
// Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
// prologue to RBP in the parent function.
const X86Subtarget &Subtarget =
static_cast<const X86Subtarget &>(DAG.getSubtarget());
if (Subtarget.is64Bit())
return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
int RegNodeSize = getSEHRegistrationNodeSize(Fn);
// RegNodeBase = EntryEBP - RegNodeSize
// ParentFP = RegNodeBase - ParentFrameOffset
SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
DAG.getConstant(RegNodeSize, dl, PtrVT));
return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
}
SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SelectionDAG &DAG) const {
// Helper to detect if the operand is CUR_DIRECTION rounding mode.
auto isRoundModeCurDirection = [](SDValue Rnd) {
if (!isa<ConstantSDNode>(Rnd))
return false;
unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
return Round == X86::STATIC_ROUNDING::CUR_DIRECTION;
};
SDLoc dl(Op);
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
MVT VT = Op.getSimpleValueType();
const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
if (IntrData) {
switch(IntrData->Type) {
case INTR_TYPE_1OP: {
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(2);
if (!isRoundModeCurDirection(Rnd)) {
return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
Op.getOperand(1), Rnd);
}
}
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
}
case INTR_TYPE_2OP:
case INTR_TYPE_2OP_IMM8: {
SDValue Src2 = Op.getOperand(2);
if (IntrData->Type == INTR_TYPE_2OP_IMM8)
Src2 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src2);
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(3);
if (!isRoundModeCurDirection(Rnd)) {
return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
Op.getOperand(1), Src2, Rnd);
}
}
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
Op.getOperand(1), Src2);
}
case INTR_TYPE_3OP:
case INTR_TYPE_3OP_IMM8: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue Src3 = Op.getOperand(3);
if (IntrData->Type == INTR_TYPE_3OP_IMM8)
Src3 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src3);
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(4);
if (!isRoundModeCurDirection(Rnd)) {
return DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(),
Src1, Src2, Src3, Rnd);
}
}
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
Src1, Src2, Src3);
}
case INTR_TYPE_4OP:
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
case INTR_TYPE_1OP_MASK_RM: {
SDValue Src = Op.getOperand(1);
SDValue PassThru = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
SDValue RoundingMode;
// We always add rounding mode to the Node.
// If the rounding mode is not specified, we add the
// "current direction" mode.
if (Op.getNumOperands() == 4)
RoundingMode =
DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
else
RoundingMode = Op.getOperand(4);
assert(IntrData->Opc1 == 0 && "Unexpected second opcode!");
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
RoundingMode),
Mask, PassThru, Subtarget, DAG);
}
case INTR_TYPE_1OP_MASK: {
SDValue Src = Op.getOperand(1);
SDValue PassThru = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
// We add rounding mode to the Node when
// - RM Opcode is specified and
// - RM is not "current direction".
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(4);
if (!isRoundModeCurDirection(Rnd)) {
return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(),
Src, Rnd),
Mask, PassThru, Subtarget, DAG);
}
}
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
Mask, PassThru, Subtarget, DAG);
}
case INTR_TYPE_SCALAR_MASK: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue passThru = Op.getOperand(3);
SDValue Mask = Op.getOperand(4);
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
// There are 2 kinds of intrinsics in this group:
// (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
// (2) With rounding mode and sae - 7 operands.
bool HasRounding = IntrWithRoundingModeOpcode != 0;
if (Op.getNumOperands() == (5U + HasRounding)) {
if (HasRounding) {
SDValue Rnd = Op.getOperand(5);
if (!isRoundModeCurDirection(Rnd))
return getScalarMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, VT, Src1, Src2, Rnd),
Mask, passThru, Subtarget, DAG);
}
return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
Src2),
Mask, passThru, Subtarget, DAG);
}
assert(Op.getNumOperands() == (6U + HasRounding) &&
"Unexpected intrinsic form");
SDValue RoundingMode = Op.getOperand(5);
if (HasRounding) {
SDValue Sae = Op.getOperand(6);
if (!isRoundModeCurDirection(Sae))
return getScalarMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, VT, Src1, Src2,
RoundingMode, Sae),
Mask, passThru, Subtarget, DAG);
}
return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
Src2, RoundingMode),
Mask, passThru, Subtarget, DAG);
}
case INTR_TYPE_SCALAR_MASK_RM: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue Src0 = Op.getOperand(3);
SDValue Mask = Op.getOperand(4);
// There are 2 kinds of intrinsics in this group:
// (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
// (2) With rounding mode and sae - 7 operands.
if (Op.getNumOperands() == 6) {
SDValue Sae = Op.getOperand(5);
return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
Sae),
Mask, Src0, Subtarget, DAG);
}
assert(Op.getNumOperands() == 7 && "Unexpected intrinsic form");
SDValue RoundingMode = Op.getOperand(5);
SDValue Sae = Op.getOperand(6);
return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
RoundingMode, Sae),
Mask, Src0, Subtarget, DAG);
}
case INTR_TYPE_2OP_MASK: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue PassThru = Op.getOperand(3);
SDValue Mask = Op.getOperand(4);
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(5);
if (!isRoundModeCurDirection(Rnd)) {
return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(),
Src1, Src2, Rnd),
Mask, PassThru, Subtarget, DAG);
}
}
// TODO: Intrinsics should have fast-math-flags to propagate.
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,Src1,Src2),
Mask, PassThru, Subtarget, DAG);
}
case INTR_TYPE_2OP_MASK_RM: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue PassThru = Op.getOperand(3);
SDValue Mask = Op.getOperand(4);
// We specify 2 possible modes for intrinsics, with/without rounding
// modes.
// First, we check if the intrinsic have rounding mode (6 operands),
// if not, we set rounding mode to "current".
SDValue Rnd;
if (Op.getNumOperands() == 6)
Rnd = Op.getOperand(5);
else
Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
Src1, Src2, Rnd),
Mask, PassThru, Subtarget, DAG);
}
case INTR_TYPE_3OP_SCALAR_MASK: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue Src3 = Op.getOperand(3);
SDValue PassThru = Op.getOperand(4);
SDValue Mask = Op.getOperand(5);
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(6);
if (!isRoundModeCurDirection(Rnd))
return getScalarMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, VT, Src1, Src2, Src3, Rnd),
Mask, PassThru, Subtarget, DAG);
}
return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
Src2, Src3),
Mask, PassThru, Subtarget, DAG);
}
case INTR_TYPE_3OP_MASK: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue Src3 = Op.getOperand(3);
SDValue PassThru = Op.getOperand(4);
SDValue Mask = Op.getOperand(5);
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(6);
if (!isRoundModeCurDirection(Rnd)) {
return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(),
Src1, Src2, Src3, Rnd),
Mask, PassThru, Subtarget, DAG);
}
}
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
Src1, Src2, Src3),
Mask, PassThru, Subtarget, DAG);
}
case VPERM_2OP : {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
// Swap Src1 and Src2 in the node creation
return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
}
case FMA_OP_MASKZ:
case FMA_OP_MASK: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue Src3 = Op.getOperand(3);
SDValue Mask = Op.getOperand(4);
MVT VT = Op.getSimpleValueType();
SDValue PassThru = SDValue();
// set PassThru element
if (IntrData->Type == FMA_OP_MASKZ)
PassThru = getZeroVector(VT, Subtarget, DAG, dl);
else
PassThru = Src1;
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(5);
if (!isRoundModeCurDirection(Rnd))
return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(),
Src1, Src2, Src3, Rnd),
Mask, PassThru, Subtarget, DAG);
}
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
dl, Op.getValueType(),
Src1, Src2, Src3),
Mask, PassThru, Subtarget, DAG);
}
case IFMA_OP:
// NOTE: We need to swizzle the operands to pass the multiply operands
// first.
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
case CVTPD2PS:
// ISD::FP_ROUND has a second argument that indicates if the truncation
// does not change the value. Set it to 0 since it can change.
return DAG.getNode(IntrData->Opc0, dl, VT, Op.getOperand(1),
DAG.getIntPtrConstant(0, dl));
case CVTPD2PS_MASK: {
SDValue Src = Op.getOperand(1);
SDValue PassThru = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
// We add rounding mode to the Node when
// - RM Opcode is specified and
// - RM is not "current direction".
unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
if (IntrWithRoundingModeOpcode != 0) {
SDValue Rnd = Op.getOperand(4);
if (!isRoundModeCurDirection(Rnd)) {
return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
dl, Op.getValueType(),
Src, Rnd),
Mask, PassThru, Subtarget, DAG);
}
}
assert(IntrData->Opc0 == ISD::FP_ROUND && "Unexpected opcode!");
// ISD::FP_ROUND has a second argument that indicates if the truncation
// does not change the value. Set it to 0 since it can change.
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
DAG.getIntPtrConstant(0, dl)),
Mask, PassThru, Subtarget, DAG);
}
case FPCLASS: {
// FPclass intrinsics
SDValue Src1 = Op.getOperand(1);
MVT MaskVT = Op.getSimpleValueType();
SDValue Imm = Op.getOperand(2);
return DAG.getNode(IntrData->Opc0, dl, MaskVT, Src1, Imm);
}
case FPCLASSS: {
SDValue Src1 = Op.getOperand(1);
SDValue Imm = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
Subtarget, DAG);
// Need to fill with zeros to ensure the bitcast will produce zeroes
// for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
DAG.getConstant(0, dl, MVT::v8i1),
FPclassMask, DAG.getIntPtrConstant(0, dl));
return DAG.getBitcast(MVT::i8, Ins);
}
case CMP_MASK: {
// Comparison intrinsics with masks.
// Example of transformation:
// (i8 (int_x86_avx512_mask_pcmpeq_q_128
// (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
// (i8 (bitcast
// (v8i1 (insert_subvector zero,
// (v2i1 (and (PCMPEQM %a, %b),
// (extract_subvector
// (v8i1 (bitcast %mask)), 0))), 0))))
MVT VT = Op.getOperand(1).getSimpleValueType();
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
MVT BitcastVT = MVT::getVectorVT(MVT::i1,
Mask.getSimpleValueType().getSizeInBits());
SDValue Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
Op.getOperand(2));
SDValue CmpMask = getVectorMaskingNode(Cmp, Mask, SDValue(),
Subtarget, DAG);
// Need to fill with zeros to ensure the bitcast will produce zeroes
// for the upper bits in the v2i1/v4i1 case.
SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
DAG.getConstant(0, dl, BitcastVT),
CmpMask, DAG.getIntPtrConstant(0, dl));
return DAG.getBitcast(Op.getValueType(), Res);
}
case CMP_MASK_CC: {
MVT MaskVT = Op.getSimpleValueType();
SDValue Cmp;
SDValue CC = Op.getOperand(3);
CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, CC);
// We specify 2 possible opcodes for intrinsics with rounding modes.
// First, we check if the intrinsic may have non-default rounding mode,
// (IntrData->Opc1 != 0), then we check the rounding mode operand.
if (IntrData->Opc1 != 0) {
SDValue Rnd = Op.getOperand(4);
if (!isRoundModeCurDirection(Rnd))
Cmp = DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
Op.getOperand(2), CC, Rnd);
}
//default rounding mode
if (!Cmp.getNode())
Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
Op.getOperand(2), CC);
return Cmp;
}
case CMP_MASK_SCALAR_CC: {
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(3));
SDValue Mask = Op.getOperand(4);
SDValue Cmp;
if (IntrData->Opc1 != 0) {
SDValue Rnd = Op.getOperand(5);
if (!isRoundModeCurDirection(Rnd))
Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Rnd);
}
//default rounding mode
if(!Cmp.getNode())
Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
Subtarget, DAG);
// Need to fill with zeros to ensure the bitcast will produce zeroes
// for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
DAG.getConstant(0, dl, MVT::v8i1),
CmpMask, DAG.getIntPtrConstant(0, dl));
return DAG.getBitcast(MVT::i8, Ins);
}
case COMI: { // Comparison intrinsics
ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
SDValue SetCC;
switch (CC) {
case ISD::SETEQ: { // (ZF = 0 and PF = 0)
SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
break;
}
case ISD::SETNE: { // (ZF = 1 or PF = 1)
SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
break;
}
case ISD::SETGT: // (CF = 0 and ZF = 0)
SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
break;
case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
SetCC = getSETCC(X86::COND_A, InvComi, dl, DAG);
break;
}
case ISD::SETGE: // CF = 0
SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
break;
case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
SetCC = getSETCC(X86::COND_AE, InvComi, dl, DAG);
break;
default:
llvm_unreachable("Unexpected illegal condition!");
}
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
case COMI_RM: { // Comparison intrinsics with Sae
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
unsigned CondVal = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue();
SDValue Sae = Op.getOperand(4);
SDValue FCmp;
if (isRoundModeCurDirection(Sae))
FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
DAG.getConstant(CondVal, dl, MVT::i8));
else
FCmp = DAG.getNode(X86ISD::FSETCCM_RND, dl, MVT::v1i1, LHS, RHS,
DAG.getConstant(CondVal, dl, MVT::i8), Sae);
// Need to fill with zeros to ensure the bitcast will produce zeroes
// for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
DAG.getConstant(0, dl, MVT::v16i1),
FCmp, DAG.getIntPtrConstant(0, dl));
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
DAG.getBitcast(MVT::i16, Ins));
}
case VSHIFT:
return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
Op.getOperand(1), Op.getOperand(2), Subtarget,
DAG);
case COMPRESS_EXPAND_IN_REG: {
SDValue Mask = Op.getOperand(3);
SDValue DataToCompress = Op.getOperand(1);
SDValue PassThru = Op.getOperand(2);
if (isAllOnesConstant(Mask)) // return data as is
return Op.getOperand(1);
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
DataToCompress),
Mask, PassThru, Subtarget, DAG);
}
case FIXUPIMMS:
case FIXUPIMMS_MASKZ:
case FIXUPIMM:
case FIXUPIMM_MASKZ:{
SDValue Src1 = Op.getOperand(1);
SDValue Src2 = Op.getOperand(2);
SDValue Src3 = Op.getOperand(3);
SDValue Imm = Op.getOperand(4);
SDValue Mask = Op.getOperand(5);
SDValue Passthru = (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMMS ) ?
Src1 : getZeroVector(VT, Subtarget, DAG, dl);
// We specify 2 possible modes for intrinsics, with/without rounding
// modes.
// First, we check if the intrinsic have rounding mode (7 operands),
// if not, we set rounding mode to "current".
SDValue Rnd;
if (Op.getNumOperands() == 7)
Rnd = Op.getOperand(6);
else
Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32);
if (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMM_MASKZ)
return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
Src1, Src2, Src3, Imm, Rnd),
Mask, Passthru, Subtarget, DAG);
else // Scalar - FIXUPIMMS, FIXUPIMMS_MASKZ
return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
Src1, Src2, Src3, Imm, Rnd),
Mask, Passthru, Subtarget, DAG);
}
case ROUNDP: {
assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
// Clear the upper bits of the rounding immediate so that the legacy
// intrinsic can't trigger the scaling behavior of VRNDSCALE.
SDValue RoundingMode = DAG.getNode(ISD::AND, dl, MVT::i32,
Op.getOperand(2),
DAG.getConstant(0xf, dl, MVT::i32));
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
Op.getOperand(1), RoundingMode);
}
case ROUNDS: {
assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
// Clear the upper bits of the rounding immediate so that the legacy
// intrinsic can't trigger the scaling behavior of VRNDSCALE.
SDValue RoundingMode = DAG.getNode(ISD::AND, dl, MVT::i32,
Op.getOperand(3),
DAG.getConstant(0xf, dl, MVT::i32));
return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2), RoundingMode);
}
default:
break;
}
}
switch (IntNo) {
default: return SDValue(); // Don't custom lower most intrinsics.
// ptest and testp intrinsics. The intrinsic these come from are designed to
// return an integer value, not just an instruction so lower it to the ptest
// or testp pattern and a setcc for the result.
case Intrinsic::x86_sse41_ptestz:
case Intrinsic::x86_sse41_ptestc:
case Intrinsic::x86_sse41_ptestnzc:
case Intrinsic::x86_avx_ptestz_256:
case Intrinsic::x86_avx_ptestc_256:
case Intrinsic::x86_avx_ptestnzc_256:
case Intrinsic::x86_avx_vtestz_ps:
case Intrinsic::x86_avx_vtestc_ps:
case Intrinsic::x86_avx_vtestnzc_ps:
case Intrinsic::x86_avx_vtestz_pd:
case Intrinsic::x86_avx_vtestc_pd:
case Intrinsic::x86_avx_vtestnzc_pd:
case Intrinsic::x86_avx_vtestz_ps_256:
case Intrinsic::x86_avx_vtestc_ps_256:
case Intrinsic::x86_avx_vtestnzc_ps_256:
case Intrinsic::x86_avx_vtestz_pd_256:
case Intrinsic::x86_avx_vtestc_pd_256:
case Intrinsic::x86_avx_vtestnzc_pd_256: {
bool IsTestPacked = false;
X86::CondCode X86CC;
switch (IntNo) {
default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
case Intrinsic::x86_avx_vtestz_ps:
case Intrinsic::x86_avx_vtestz_pd:
case Intrinsic::x86_avx_vtestz_ps_256:
case Intrinsic::x86_avx_vtestz_pd_256:
IsTestPacked = true;
LLVM_FALLTHROUGH;
case Intrinsic::x86_sse41_ptestz:
case Intrinsic::x86_avx_ptestz_256:
// ZF = 1
X86CC = X86::COND_E;
break;
case Intrinsic::x86_avx_vtestc_ps:
case Intrinsic::x86_avx_vtestc_pd:
case Intrinsic::x86_avx_vtestc_ps_256:
case Intrinsic::x86_avx_vtestc_pd_256:
IsTestPacked = true;
LLVM_FALLTHROUGH;
case Intrinsic::x86_sse41_ptestc:
case Intrinsic::x86_avx_ptestc_256:
// CF = 1
X86CC = X86::COND_B;
break;
case Intrinsic::x86_avx_vtestnzc_ps:
case Intrinsic::x86_avx_vtestnzc_pd:
case Intrinsic::x86_avx_vtestnzc_ps_256:
case Intrinsic::x86_avx_vtestnzc_pd_256:
IsTestPacked = true;
LLVM_FALLTHROUGH;
case Intrinsic::x86_sse41_ptestnzc:
case Intrinsic::x86_avx_ptestnzc_256:
// ZF and CF = 0
X86CC = X86::COND_A;
break;
}
SDValue LHS = Op.getOperand(1);
SDValue RHS = Op.getOperand(2);
unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
case Intrinsic::x86_sse42_pcmpistria128:
case Intrinsic::x86_sse42_pcmpestria128:
case Intrinsic::x86_sse42_pcmpistric128:
case Intrinsic::x86_sse42_pcmpestric128:
case Intrinsic::x86_sse42_pcmpistrio128:
case Intrinsic::x86_sse42_pcmpestrio128:
case Intrinsic::x86_sse42_pcmpistris128:
case Intrinsic::x86_sse42_pcmpestris128:
case Intrinsic::x86_sse42_pcmpistriz128:
case Intrinsic::x86_sse42_pcmpestriz128: {
unsigned Opcode;
X86::CondCode X86CC;
switch (IntNo) {
default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
case Intrinsic::x86_sse42_pcmpistria128:
Opcode = X86ISD::PCMPISTR;
X86CC = X86::COND_A;
break;
case Intrinsic::x86_sse42_pcmpestria128:
Opcode = X86ISD::PCMPESTR;
X86CC = X86::COND_A;
break;
case Intrinsic::x86_sse42_pcmpistric128:
Opcode = X86ISD::PCMPISTR;
X86CC = X86::COND_B;
break;
case Intrinsic::x86_sse42_pcmpestric128:
Opcode = X86ISD::PCMPESTR;
X86CC = X86::COND_B;
break;
case Intrinsic::x86_sse42_pcmpistrio128:
Opcode = X86ISD::PCMPISTR;
X86CC = X86::COND_O;
break;
case Intrinsic::x86_sse42_pcmpestrio128:
Opcode = X86ISD::PCMPESTR;
X86CC = X86::COND_O;
break;
case Intrinsic::x86_sse42_pcmpistris128:
Opcode = X86ISD::PCMPISTR;
X86CC = X86::COND_S;
break;
case Intrinsic::x86_sse42_pcmpestris128:
Opcode = X86ISD::PCMPESTR;
X86CC = X86::COND_S;
break;
case Intrinsic::x86_sse42_pcmpistriz128:
Opcode = X86ISD::PCMPISTR;
X86CC = X86::COND_E;
break;
case Intrinsic::x86_sse42_pcmpestriz128:
Opcode = X86ISD::PCMPESTR;
X86CC = X86::COND_E;
break;
}
SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
}
case Intrinsic::x86_sse42_pcmpistri128:
case Intrinsic::x86_sse42_pcmpestri128: {
unsigned Opcode;
if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
Opcode = X86ISD::PCMPISTR;
else
Opcode = X86ISD::PCMPESTR;
SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
return DAG.getNode(Opcode, dl, VTs, NewOps);
}
case Intrinsic::x86_sse42_pcmpistrm128:
case Intrinsic::x86_sse42_pcmpestrm128: {
unsigned Opcode;
if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
Opcode = X86ISD::PCMPISTR;
else
Opcode = X86ISD::PCMPESTR;
SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
}
case Intrinsic::eh_sjlj_lsda: {
MachineFunction &MF = DAG.getMachineFunction();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
auto &Context = MF.getMMI().getContext();
MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
Twine(MF.getFunctionNumber()));
return DAG.getNode(getGlobalWrapperKind(), dl, VT,
DAG.getMCSymbol(S, PtrVT));
}
case Intrinsic::x86_seh_lsda: {
// Compute the symbol for the LSDA. We know it'll get emitted later.
MachineFunction &MF = DAG.getMachineFunction();
SDValue Op1 = Op.getOperand(1);
auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
GlobalValue::dropLLVMManglingEscape(Fn->getName()));
// Generate a simple absolute symbol reference. This intrinsic is only
// supported on 32-bit Windows, which isn't PIC.
SDValue Result = DAG.getMCSymbol(LSDASym, VT);
return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
}
case Intrinsic::x86_seh_recoverfp: {
SDValue FnOp = Op.getOperand(1);
SDValue IncomingFPOp = Op.getOperand(2);
GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
if (!Fn)
report_fatal_error(
"llvm.x86.seh.recoverfp must take a function as the first argument");
return recoverFramePointer(DAG, Fn, IncomingFPOp);
}
case Intrinsic::localaddress: {
// Returns one of the stack, base, or frame pointer registers, depending on
// which is used to reference local variables.
MachineFunction &MF = DAG.getMachineFunction();
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned Reg;
if (RegInfo->hasBasePointer(MF))
Reg = RegInfo->getBaseRegister();
else // This function handles the SP or FP case.
Reg = RegInfo->getPtrSizedFrameRegister(MF);
return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
}
}
}
static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Src, SDValue Mask, SDValue Base,
SDValue Index, SDValue ScaleOp, SDValue Chain,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
// Scale must be constant.
if (!C)
return SDValue();
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
EVT MaskVT = Mask.getValueType();
SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
// If source is undef or we know it won't be used, use a zero vector
// to break register dependency.
// TODO: use undef instead and let BreakFalseDeps deal with it?
if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
SDValue Ops[] = {Src, Base, Scale, Index, Disp, Segment, Mask, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
return DAG.getMergeValues(RetOps, dl);
}
static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Src, SDValue Mask, SDValue Base,
SDValue Index, SDValue ScaleOp, SDValue Chain,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
// Scale must be constant.
if (!C)
return SDValue();
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
MVT MaskVT = MVT::getVectorVT(MVT::i1,
Index.getSimpleValueType().getVectorNumElements());
SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
// If source is undef or we know it won't be used, use a zero vector
// to break register dependency.
// TODO: use undef instead and let BreakFalseDeps deal with it?
if (Src.isUndef() || ISD::isBuildVectorAllOnes(VMask.getNode()))
Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
SDValue Ops[] = {Src, VMask, Base, Scale, Index, Disp, Segment, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
return DAG.getMergeValues(RetOps, dl);
}
static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Src, SDValue Mask, SDValue Base,
SDValue Index, SDValue ScaleOp, SDValue Chain,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
// Scale must be constant.
if (!C)
return SDValue();
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
MVT MaskVT = MVT::getVectorVT(MVT::i1,
Index.getSimpleValueType().getVectorNumElements());
SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
SDValue Ops[] = {Base, Scale, Index, Disp, Segment, VMask, Src, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
return SDValue(Res, 1);
}
static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
SDValue Mask, SDValue Base, SDValue Index,
SDValue ScaleOp, SDValue Chain,
const X86Subtarget &Subtarget) {
SDLoc dl(Op);
auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
// Scale must be constant.
if (!C)
return SDValue();
SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8);
SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
SDValue Segment = DAG.getRegister(0, MVT::i32);
MVT MaskVT =
MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
return SDValue(Res, 0);
}
/// Handles the lowering of builtin intrinsic that return the value
/// of the extended control register.
static void getExtendedControlRegister(SDNode *N, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget,
SmallVectorImpl<SDValue> &Results) {
assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue LO, HI;
// The ECX register is used to select the index of the XCR register to
// return.
SDValue Chain =
DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX, N->getOperand(2));
SDNode *N1 = DAG.getMachineNode(X86::XGETBV, DL, Tys, Chain);
Chain = SDValue(N1, 0);
// Reads the content of XCR and returns it in registers EDX:EAX.
if (Subtarget.is64Bit()) {
LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
LO.getValue(2));
} else {
LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
LO.getValue(2));
}
Chain = HI.getValue(1);
if (Subtarget.is64Bit()) {
// Merge the two 32-bit values into a 64-bit one..
SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
DAG.getConstant(32, DL, MVT::i8));
Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
Results.push_back(Chain);
return;
}
// Use a buildpair to merge the two 32-bit values into a 64-bit one.
SDValue Ops[] = { LO, HI };
SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
Results.push_back(Pair);
Results.push_back(Chain);
}
/// Handles the lowering of builtin intrinsics that read performance monitor
/// counters (x86_rdpmc).
static void getReadPerformanceCounter(SDNode *N, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget,
SmallVectorImpl<SDValue> &Results) {
assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue LO, HI;
// The ECX register is used to select the index of the performance counter
// to read.
SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
N->getOperand(2));
SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
// Reads the content of a 64-bit performance counter and returns it in the
// registers EDX:EAX.
if (Subtarget.is64Bit()) {
LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
LO.getValue(2));
} else {
LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
LO.getValue(2));
}
Chain = HI.getValue(1);
if (Subtarget.is64Bit()) {
// The EAX register is loaded with the low-order 32 bits. The EDX register
// is loaded with the supported high-order bits of the counter.
SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
DAG.getConstant(32, DL, MVT::i8));
Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
Results.push_back(Chain);
return;
}
// Use a buildpair to merge the two 32-bit values into a 64-bit one.
SDValue Ops[] = { LO, HI };
SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
Results.push_back(Pair);
Results.push_back(Chain);
}
/// Handles the lowering of builtin intrinsics that read the time stamp counter
/// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
/// READCYCLECOUNTER nodes.
static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
SelectionDAG &DAG,
const X86Subtarget &Subtarget,
SmallVectorImpl<SDValue> &Results) {
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
SDValue LO, HI;
// The processor's time-stamp counter (a 64-bit MSR) is stored into the
// EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
// and the EAX register is loaded with the low-order 32 bits.
if (Subtarget.is64Bit()) {
LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
LO.getValue(2));
} else {
LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
LO.getValue(2));
}
SDValue Chain = HI.getValue(1);
if (Opcode == X86ISD::RDTSCP_DAG) {
assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
// Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
// the ECX register. Add 'ecx' explicitly to the chain.
SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
HI.getValue(2));
// Explicitly store the content of ECX at the location passed in input
// to the 'rdtscp' intrinsic.
Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
MachinePointerInfo());
}
if (Subtarget.is64Bit()) {
// The EDX register is loaded with the high-order 32 bits of the MSR, and
// the EAX register is loaded with the low-order 32 bits.
SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
DAG.getConstant(32, DL, MVT::i8));
Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
Results.push_back(Chain);
return;
}
// Use a buildpair to merge the two 32-bit values into a 64-bit one.
SDValue Ops[] = { LO, HI };
SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
Results.push_back(Pair);
Results.push_back(Chain);
}
static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SmallVector<SDValue, 2> Results;
SDLoc DL(Op);
getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
Results);
return DAG.getMergeValues(Results, DL);
}
static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
MachineFunction &MF = DAG.getMachineFunction();
SDValue Chain = Op.getOperand(0);
SDValue RegNode = Op.getOperand(2);
WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
if (!EHInfo)
report_fatal_error("EH registrations only live in functions using WinEH");
// Cast the operand to an alloca, and remember the frame index.
auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
if (!FINode)
report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
// Return the chain operand without making any DAG nodes.
return Chain;
}
static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
MachineFunction &MF = DAG.getMachineFunction();
SDValue Chain = Op.getOperand(0);
SDValue EHGuard = Op.getOperand(2);
WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
if (!EHInfo)
report_fatal_error("EHGuard only live in functions using WinEH");
// Cast the operand to an alloca, and remember the frame index.
auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
if (!FINode)
report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
EHInfo->EHGuardFrameIndex = FINode->getIndex();
// Return the chain operand without making any DAG nodes.
return Chain;
}
/// Emit Truncating Store with signed or unsigned saturation.
static SDValue
EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
SelectionDAG &DAG) {
SDVTList VTs = DAG.getVTList(MVT::Other);
SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
SDValue Ops[] = { Chain, Val, Ptr, Undef };
return SignedSat ?
DAG.getTargetMemSDNode<TruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
DAG.getTargetMemSDNode<TruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
}
/// Emit Masked Truncating Store with signed or unsigned saturation.
static SDValue
EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
MachineMemOperand *MMO, SelectionDAG &DAG) {
SDVTList VTs = DAG.getVTList(MVT::Other);
SDValue Ops[] = { Chain, Ptr, Mask, Val };
return SignedSat ?
DAG.getTargetMemSDNode<MaskedTruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
DAG.getTargetMemSDNode<MaskedTruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
}
static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
if (!IntrData) {
switch (IntNo) {
case llvm::Intrinsic::x86_seh_ehregnode:
return MarkEHRegistrationNode(Op, DAG);
case llvm::Intrinsic::x86_seh_ehguard:
return MarkEHGuard(Op, DAG);
case llvm::Intrinsic::x86_flags_read_u32:
case llvm::Intrinsic::x86_flags_read_u64:
case llvm::Intrinsic::x86_flags_write_u32:
case llvm::Intrinsic::x86_flags_write_u64: {
// We need a frame pointer because this will get lowered to a PUSH/POP
// sequence.
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
MFI.setHasCopyImplyingStackAdjustment(true);
// Don't do anything here, we will expand these intrinsics out later
// during ExpandISelPseudos in EmitInstrWithCustomInserter.
return SDValue();
}
case Intrinsic::x86_lwpins32:
case Intrinsic::x86_lwpins64:
case Intrinsic::x86_umwait:
case Intrinsic::x86_tpause: {
SDLoc dl(Op);
SDValue Chain = Op->getOperand(0);
SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
unsigned Opcode;
switch (IntNo) {
default: llvm_unreachable("Impossible intrinsic");
case Intrinsic::x86_umwait:
Opcode = X86ISD::UMWAIT;
break;
case Intrinsic::x86_tpause:
Opcode = X86ISD::TPAUSE;
break;
case Intrinsic::x86_lwpins32:
case Intrinsic::x86_lwpins64:
Opcode = X86ISD::LWPINS;
break;
}
SDValue Operation =
DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
Op->getOperand(3), Op->getOperand(4));
SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, SetCC);
return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
Operation.getValue(1));
}
}
return SDValue();
}
SDLoc dl(Op);
switch(IntrData->Type) {
default: llvm_unreachable("Unknown Intrinsic Type");
case RDSEED:
case RDRAND: {
// Emit the node with the right value type.
SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
// If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
// Otherwise return the value from Rand, which is always 0, casted to i32.
SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
DAG.getConstant(1, dl, Op->getValueType(1)),
DAG.getConstant(X86::COND_B, dl, MVT::i8),
SDValue(Result.getNode(), 1) };
SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
// Return { result, isValid, chain }.
return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
SDValue(Result.getNode(), 2));
}
case GATHER_AVX2: {
SDValue Chain = Op.getOperand(0);
SDValue Src = Op.getOperand(2);
SDValue Base = Op.getOperand(3);
SDValue Index = Op.getOperand(4);
SDValue Mask = Op.getOperand(5);
SDValue Scale = Op.getOperand(6);
return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
Scale, Chain, Subtarget);
}
case GATHER: {
//gather(v1, mask, index, base, scale);
SDValue Chain = Op.getOperand(0);
SDValue Src = Op.getOperand(2);
SDValue Base = Op.getOperand(3);
SDValue Index = Op.getOperand(4);
SDValue Mask = Op.getOperand(5);
SDValue Scale = Op.getOperand(6);
return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale,
Chain, Subtarget);
}
case SCATTER: {
//scatter(base, mask, index, v1, scale);
SDValue Chain = Op.getOperand(0);
SDValue Base = Op.getOperand(2);
SDValue Mask = Op.getOperand(3);
SDValue Index = Op.getOperand(4);
SDValue Src = Op.getOperand(5);
SDValue Scale = Op.getOperand(6);
return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
Scale, Chain, Subtarget);
}
case PREFETCH: {
SDValue Hint = Op.getOperand(6);
unsigned HintVal = cast<ConstantSDNode>(Hint)->getZExtValue();
assert((HintVal == 2 || HintVal == 3) &&
"Wrong prefetch hint in intrinsic: should be 2 or 3");
unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
SDValue Chain = Op.getOperand(0);
SDValue Mask = Op.getOperand(2);
SDValue Index = Op.getOperand(3);
SDValue Base = Op.getOperand(4);
SDValue Scale = Op.getOperand(5);
return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
Subtarget);
}
// Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
case RDTSC: {
SmallVector<SDValue, 2> Results;
getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
Results);
return DAG.getMergeValues(Results, dl);
}
// Read Performance Monitoring Counters.
case RDPMC: {
SmallVector<SDValue, 2> Results;
getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
return DAG.getMergeValues(Results, dl);
}
// Get Extended Control Register.
case XGETBV: {
SmallVector<SDValue, 2> Results;
getExtendedControlRegister(Op.getNode(), dl, DAG, Subtarget, Results);
return DAG.getMergeValues(Results, dl);
}
// XTEST intrinsics.
case XTEST: {
SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
Ret, SDValue(InTrans.getNode(), 1));
}
// ADC/ADCX/SBB
case ADX: {
SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
SDVTList VTs = DAG.getVTList(Op.getOperand(3).getValueType(), MVT::i32);
SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
DAG.getConstant(-1, dl, MVT::i8));
SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
Op.getOperand(4), GenCF.getValue(1));
SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
Op.getOperand(5), MachinePointerInfo());
SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
SDValue Results[] = { SetCC, Store };
return DAG.getMergeValues(Results, dl);
}
case TRUNCATE_TO_MEM_VI8:
case TRUNCATE_TO_MEM_VI16:
case TRUNCATE_TO_MEM_VI32: {
SDValue Mask = Op.getOperand(4);
SDValue DataToTruncate = Op.getOperand(3);
SDValue Addr = Op.getOperand(2);
SDValue Chain = Op.getOperand(0);
MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
assert(MemIntr && "Expected MemIntrinsicSDNode!");
EVT MemVT = MemIntr->getMemoryVT();
uint16_t TruncationOp = IntrData->Opc0;
switch (TruncationOp) {
case X86ISD::VTRUNC: {
if (isAllOnesConstant(Mask)) // return just a truncate store
return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
MemIntr->getMemOperand());
MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, MemVT,
MemIntr->getMemOperand(), true /* truncating */);
}
case X86ISD::VTRUNCUS:
case X86ISD::VTRUNCS: {
bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
if (isAllOnesConstant(Mask))
return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
MemIntr->getMemOperand(), DAG);
MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
VMask, MemVT, MemIntr->getMemOperand(), DAG);
}
default:
llvm_unreachable("Unsupported truncstore intrinsic");
}
}
}
}
SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
SelectionDAG &DAG) const {
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
MFI.setReturnAddressIsTaken(true);
if (verifyReturnAddressArgumentIsConstant(Op, DAG))
return SDValue();
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
SDLoc dl(Op);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
MachinePointerInfo());
}
// Just load the return address.
SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
MachinePointerInfo());
}
SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
SelectionDAG &DAG) const {
DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
return getReturnAddressFrameIndex(DAG);
}
SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
EVT VT = Op.getValueType();
MFI.setFrameAddressIsTaken(true);
if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
// Depth > 0 makes no sense on targets which use Windows unwind codes. It
// is not possible to crawl up the stack without looking at the unwind codes
// simultaneously.
int FrameAddrIndex = FuncInfo->getFAIndex();
if (!FrameAddrIndex) {
// Set up a frame object for the return address.
unsigned SlotSize = RegInfo->getSlotSize();
FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
SlotSize, /*Offset=*/0, /*IsImmutable=*/false);
FuncInfo->setFAIndex(FrameAddrIndex);
}
return DAG.getFrameIndex(FrameAddrIndex, VT);
}
unsigned FrameReg =
RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
SDLoc dl(Op); // FIXME probably not meaningful
unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
(FrameReg == X86::EBP && VT == MVT::i32)) &&
"Invalid Frame Register!");
SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
while (Depth--)
FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
MachinePointerInfo());
return FrameAddr;
}
// FIXME? Maybe this could be a TableGen attribute on some registers and
// this table could be generated automatically from RegInfo.
unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
SelectionDAG &DAG) const {
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
const MachineFunction &MF = DAG.getMachineFunction();
unsigned Reg = StringSwitch<unsigned>(RegName)
.Case("esp", X86::ESP)
.Case("rsp", X86::RSP)
.Case("ebp", X86::EBP)
.Case("rbp", X86::RBP)
.Default(0);
if (Reg == X86::EBP || Reg == X86::RBP) {
if (!TFI.hasFP(MF))
report_fatal_error("register " + StringRef(RegName) +
" is allocatable: function has no frame pointer");
#ifndef NDEBUG
else {
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned FrameReg =
RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
"Invalid Frame Register!");
}
#endif
}
if (Reg)
return Reg;
report_fatal_error("Invalid register name global variable");
}
SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
SelectionDAG &DAG) const {
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
}
unsigned X86TargetLowering::getExceptionPointerRegister(
const Constant *PersonalityFn) const {
if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
}
unsigned X86TargetLowering::getExceptionSelectorRegister(
const Constant *PersonalityFn) const {
// Funclet personalities don't use selectors (the runtime does the selection).
assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
}
bool X86TargetLowering::needsFixedCatchObjects() const {
return Subtarget.isTargetWin64();
}
SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
SDValue Chain = Op.getOperand(0);
SDValue Offset = Op.getOperand(1);
SDValue Handler = Op.getOperand(2);
SDLoc dl (Op);
EVT PtrVT = getPointerTy(DAG.getDataLayout());
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
(FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
"Invalid Frame Register!");
SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
DAG.getIntPtrConstant(RegInfo->getSlotSize(),
dl));
StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
DAG.getRegister(StoreAddrReg, PtrVT));
}
SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
// If the subtarget is not 64bit, we may need the global base reg
// after isel expand pseudo, i.e., after CGBR pass ran.
// Therefore, ask for the GlobalBaseReg now, so that the pass
// inserts the code for us in case we need it.
// Otherwise, we will end up in a situation where we will
// reference a virtual register that is not defined!
if (!Subtarget.is64Bit()) {
const X86InstrInfo *TII = Subtarget.getInstrInfo();
(void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
}
return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
DAG.getVTList(MVT::i32, MVT::Other),
Op.getOperand(0), Op.getOperand(1));
}
SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
Op.getOperand(0), Op.getOperand(1));
}
SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
Op.getOperand(0));
}
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
return Op.getOperand(0);
}
SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
SelectionDAG &DAG) const {
SDValue Root = Op.getOperand(0);
SDValue Trmp = Op.getOperand(1); // trampoline
SDValue FPtr = Op.getOperand(2); // nested function
SDValue Nest = Op.getOperand(3); // 'nest' parameter value
SDLoc dl (Op);
const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
if (Subtarget.is64Bit()) {
SDValue OutChains[6];
// Large code-model.
const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
// Load the pointer to the nested function into R11.
unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
SDValue Addr = Trmp;
OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
Addr, MachinePointerInfo(TrmpAddr));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(2, dl, MVT::i64));
OutChains[1] =
DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
/* Alignment = */ 2);
// Load the 'nest' parameter value into R10.
// R10 is specified in X86CallingConv.td
OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(10, dl, MVT::i64));
OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
Addr, MachinePointerInfo(TrmpAddr, 10));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(12, dl, MVT::i64));
OutChains[3] =
DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
/* Alignment = */ 2);
// Jump to the nested function.
OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(20, dl, MVT::i64));
OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
Addr, MachinePointerInfo(TrmpAddr, 20));
unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
DAG.getConstant(22, dl, MVT::i64));
OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
Addr, MachinePointerInfo(TrmpAddr, 22));
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
} else {
const Function *Func =
cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
CallingConv::ID CC = Func->getCallingConv();
unsigned NestReg;
switch (CC) {
default:
llvm_unreachable("Unsupported calling convention");
case CallingConv::C:
case CallingConv::X86_StdCall: {
// Pass 'nest' parameter in ECX.
// Must be kept in sync with X86CallingConv.td
NestReg = X86::ECX;
// Check that ECX wasn't needed by an 'inreg' parameter.
FunctionType *FTy = Func->getFunctionType();
const AttributeList &Attrs = Func->getAttributes();
if (!Attrs.isEmpty() && !Func->isVarArg()) {
unsigned InRegCount = 0;
unsigned Idx = 1;
for (FunctionType::param_iterator I = FTy->param_begin(),
E = FTy->param_end(); I != E; ++I, ++Idx)
if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
auto &DL = DAG.getDataLayout();
// FIXME: should only count parameters that are lowered to integers.
InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
}
if (InRegCount > 2) {
report_fatal_error("Nest register in use - reduce number of inreg"
" parameters!");
}
}
break;
}
case CallingConv::X86_FastCall:
case CallingConv::X86_ThisCall:
case CallingConv::Fast:
// Pass 'nest' parameter in EAX.
// Must be kept in sync with X86CallingConv.td
NestReg = X86::EAX;
break;
}
SDValue OutChains[4];
SDValue Addr, Disp;
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(10, dl, MVT::i32));
Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
// This is storing the opcode for MOV32ri.
const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
OutChains[0] =
DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
Trmp, MachinePointerInfo(TrmpAddr));
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(1, dl, MVT::i32));
OutChains[1] =
DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
/* Alignment = */ 1);
const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(5, dl, MVT::i32));
OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
Addr, MachinePointerInfo(TrmpAddr, 5),
/* Alignment = */ 1);
Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
DAG.getConstant(6, dl, MVT::i32));
OutChains[3] =
DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
/* Alignment = */ 1);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
}
}
SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
SelectionDAG &DAG) const {
/*
The rounding mode is in bits 11:10 of FPSR, and has the following
settings:
00 Round to nearest
01 Round to -inf
10 Round to +inf
11 Round to 0
FLT_ROUNDS, on the other hand, expects the following:
-1 Undefined
0 Round to 0
1 Round to nearest
2 Round to +inf
3 Round to -inf
To perform the conversion, we do:
(((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
*/
MachineFunction &MF = DAG.getMachineFunction();
const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
unsigned StackAlignment = TFI.getStackAlignment();
MVT VT = Op.getSimpleValueType();
SDLoc DL(Op);
// Save FP Control Word to stack slot
int SSFI = MF.getFrameInfo().CreateStackObject(2, StackAlignment, false);
SDValue StackSlot =
DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
MachineMemOperand::MOStore, 2, 2);
SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
DAG.getVTList(MVT::Other),
Ops, MVT::i16, MMO);
// Load FP Control Word from stack slot
SDValue CWD =
DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
// Transform as necessary
SDValue CWD1 =
DAG.getNode(ISD::SRL, DL, MVT::i16,
DAG.getNode(ISD::AND, DL, MVT::i16,
CWD, DAG.getConstant(0x800, DL, MVT::i16)),
DAG.getConstant(11, DL, MVT::i8));
SDValue CWD2 =
DAG.getNode(ISD::SRL, DL, MVT::i16,
DAG.getNode(ISD::AND, DL, MVT::i16,
CWD, DAG.getConstant(0x400, DL, MVT::i16)),
DAG.getConstant(9, DL, MVT::i8));
SDValue RetVal =
DAG.getNode(ISD::AND, DL, MVT::i16,
DAG.getNode(ISD::ADD, DL, MVT::i16,
DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
DAG.getConstant(1, DL, MVT::i16)),
DAG.getConstant(3, DL, MVT::i16));
return DAG.getNode((VT.getSizeInBits() < 16 ?
ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
}
// Split an unary integer op into 2 half sized ops.
static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
unsigned NumElems = VT.getVectorNumElements();
unsigned SizeInBits = VT.getSizeInBits();
MVT EltVT = VT.getVectorElementType();
SDValue Src = Op.getOperand(0);
assert(EltVT == Src.getSimpleValueType().getVectorElementType() &&
"Src and Op should have the same element type!");
// Extract the Lo/Hi vectors
SDLoc dl(Op);
SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
}
// Decompose 256-bit ops into smaller 128-bit ops.
static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
assert(Op.getSimpleValueType().is256BitVector() &&
Op.getSimpleValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return LowerVectorIntUnary(Op, DAG);
}
// Decompose 512-bit ops into smaller 256-bit ops.
static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
assert(Op.getSimpleValueType().is512BitVector() &&
Op.getSimpleValueType().isInteger() &&
"Only handle AVX 512-bit vector integer operation");
return LowerVectorIntUnary(Op, DAG);
}
/// Lower a vector CTLZ using native supported vector CTLZ instruction.
//
// i8/i16 vector implemented using dword LZCNT vector instruction
// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
// split the vector, perform operation on it's Lo a Hi part and
// concatenate the results.
static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert(Op.getOpcode() == ISD::CTLZ);
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
unsigned NumElems = VT.getVectorNumElements();
assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
"Unsupported element type");
// Split vector, it's Lo and Hi parts will be handled in next iteration.
if (NumElems > 16 ||
(NumElems == 16 && !Subtarget.canExtendTo512DQ()))
return LowerVectorIntUnary(Op, DAG);
MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
"Unsupported value type for operation");
// Use native supported vector instruction vplzcntd.
Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
}
// Lower CTLZ using a PSHUFB lookup table implementation.
static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
int NumElts = VT.getVectorNumElements();
int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
// Per-nibble leading zero PSHUFB lookup table.
const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
/* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
/* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
/* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
SmallVector<SDValue, 64> LUTVec;
for (int i = 0; i < NumBytes; ++i)
LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
// Begin by bitcasting the input to byte vector, then split those bytes
// into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
// If the hi input nibble is zero then we add both results together, otherwise
// we just take the hi result (by masking the lo result to zero before the
// add).
SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
SDValue Zero = getZeroVector(CurrVT, Subtarget, DAG, DL);
SDValue NibbleMask = DAG.getConstant(0xF, DL, CurrVT);
SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
SDValue Lo = DAG.getNode(ISD::AND, DL, CurrVT, Op0, NibbleMask);
SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
SDValue HiZ;
if (CurrVT.is512BitVector()) {
MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
} else {
HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
}
Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
// Merge result back from vXi8 back to VT, working on the lo/hi halves
// of the current vector width in the same way we did for the nibbles.
// If the upper half of the input element is zero then add the halves'
// leading zero counts together, otherwise just use the upper half's.
// Double the width of the result until we are at target width.
while (CurrVT != VT) {
int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
int CurrNumElts = CurrVT.getVectorNumElements();
MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
// Check if the upper half of the input element is zero.
if (CurrVT.is512BitVector()) {
MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
} else {
HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
}
HiZ = DAG.getBitcast(NextVT, HiZ);
// Move the upper/lower halves to the lower bits as we'll be extending to
// NextVT. Mask the lower result to zero if HiZ is true and add the results
// together.
SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
CurrVT = NextVT;
}
return Res;
}
static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
if (Subtarget.hasCDI() &&
// vXi8 vectors need to be promoted to 512-bits for vXi32.
(Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
// Decompose 256-bit ops into smaller 128-bit ops.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntUnary(Op, DAG);
// Decompose 512-bit ops into smaller 256-bit ops.
if (VT.is512BitVector() && !Subtarget.hasBWI())
return Lower512IntUnary(Op, DAG);
assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
}
static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
MVT OpVT = VT;
unsigned NumBits = VT.getSizeInBits();
SDLoc dl(Op);
unsigned Opc = Op.getOpcode();
if (VT.isVector())
return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
Op = Op.getOperand(0);
if (VT == MVT::i8) {
// Zero extend to i32 since there is not an i8 bsr.
OpVT = MVT::i32;
Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
}
// Issue a bsr (scan bits in reverse) which also sets EFLAGS.
SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
if (Opc == ISD::CTLZ) {
// If src is zero (i.e. bsr sets ZF), returns NumBits.
SDValue Ops[] = {
Op,
DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
DAG.getConstant(X86::COND_E, dl, MVT::i8),
Op.getValue(1)
};
Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
}
// Finally xor with NumBits-1.
Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
DAG.getConstant(NumBits - 1, dl, OpVT));
if (VT == MVT::i8)
Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
return Op;
}
static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
unsigned NumBits = VT.getScalarSizeInBits();
SDLoc dl(Op);
if (VT.isVector()) {
SDValue N0 = Op.getOperand(0);
SDValue Zero = DAG.getConstant(0, dl, VT);
// lsb(x) = (x & -x)
SDValue LSB = DAG.getNode(ISD::AND, dl, VT, N0,
DAG.getNode(ISD::SUB, dl, VT, Zero, N0));
// cttz_undef(x) = (width - 1) - ctlz(lsb)
if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) {
SDValue WidthMinusOne = DAG.getConstant(NumBits - 1, dl, VT);
return DAG.getNode(ISD::SUB, dl, VT, WidthMinusOne,
DAG.getNode(ISD::CTLZ, dl, VT, LSB));
}
// cttz(x) = ctpop(lsb - 1)
SDValue One = DAG.getConstant(1, dl, VT);
return DAG.getNode(ISD::CTPOP, dl, VT,
DAG.getNode(ISD::SUB, dl, VT, LSB, One));
}
assert(Op.getOpcode() == ISD::CTTZ &&
"Only scalar CTTZ requires custom lowering");
// Issue a bsf (scan bits forward) which also sets EFLAGS.
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op.getOperand(0));
// If src is zero (i.e. bsf sets ZF), returns NumBits.
SDValue Ops[] = {
Op,
DAG.getConstant(NumBits, dl, VT),
DAG.getConstant(X86::COND_E, dl, MVT::i8),
Op.getValue(1)
};
return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
}
/// Break a 256-bit integer operation into two new 128-bit ones and then
/// concatenate the result back.
static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert(VT.is256BitVector() && VT.isInteger() &&
"Unsupported value type for operation");
unsigned NumElems = VT.getVectorNumElements();
SDLoc dl(Op);
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
// Extract the RHS vectors
SDValue RHS = Op.getOperand(1);
SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
MVT EltVT = VT.getVectorElementType();
MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
}
/// Break a 512-bit integer operation into two new 256-bit ones and then
/// concatenate the result back.
static SDValue Lower512IntArith(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert(VT.is512BitVector() && VT.isInteger() &&
"Unsupported value type for operation");
unsigned NumElems = VT.getVectorNumElements();
SDLoc dl(Op);
// Extract the LHS vectors
SDValue LHS = Op.getOperand(0);
SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
// Extract the RHS vectors
SDValue RHS = Op.getOperand(1);
SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
MVT EltVT = VT.getVectorElementType();
MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
}
static SDValue LowerADD_SUB(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
if (VT.getScalarType() == MVT::i1)
return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
Op.getOperand(0), Op.getOperand(1));
assert(Op.getSimpleValueType().is256BitVector() &&
Op.getSimpleValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return Lower256IntArith(Op, DAG);
}
static SDValue LowerABS(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
// Since X86 does not have CMOV for 8-bit integer, we don't convert
// 8-bit integer abs to NEG and CMOV.
SDLoc DL(Op);
SDValue N0 = Op.getOperand(0);
SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
DAG.getConstant(0, DL, VT), N0);
SDValue Ops[] = {N0, Neg, DAG.getConstant(X86::COND_GE, DL, MVT::i8),
SDValue(Neg.getNode(), 1)};
return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
}
assert(Op.getSimpleValueType().is256BitVector() &&
Op.getSimpleValueType().isInteger() &&
"Only handle AVX 256-bit vector integer operation");
return Lower256IntUnary(Op, DAG);
}
static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
// For AVX1 cases, split to use legal ops (everything but v4i64).
if (VT.getScalarType() != MVT::i64 && VT.is256BitVector())
return Lower256IntArith(Op, DAG);
SDLoc DL(Op);
unsigned Opcode = Op.getOpcode();
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
// For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit,
// using the SMIN/SMAX instructions and flipping the signbit back.
if (VT == MVT::v8i16) {
assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&
"Unexpected MIN/MAX opcode");
SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT);
N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign);
N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign);
Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX);
SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1);
return DAG.getNode(ISD::XOR, DL, VT, Result, Sign);
}
// Else, expand to a compare/select.
ISD::CondCode CC;
switch (Opcode) {
case ISD::SMIN: CC = ISD::CondCode::SETLT; break;
case ISD::SMAX: CC = ISD::CondCode::SETGT; break;
case ISD::UMIN: CC = ISD::CondCode::SETULT; break;
case ISD::UMAX: CC = ISD::CondCode::SETUGT; break;
default: llvm_unreachable("Unknown MINMAX opcode");
}
SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC);
return DAG.getSelect(DL, VT, Cond, N0, N1);
}
static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
if (VT.getScalarType() == MVT::i1)
return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
// Decompose 256-bit ops into smaller 128-bit ops.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntArith(Op, DAG);
SDValue A = Op.getOperand(0);
SDValue B = Op.getOperand(1);
// Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
// vector pairs, multiply and truncate.
if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
if (Subtarget.hasInt256()) {
// For 512-bit vectors, split into 256-bit vectors to allow the
// sign-extension to occur.
if (VT == MVT::v64i8)
return Lower512IntArith(Op, DAG);
// For 256-bit vectors, split into 128-bit vectors to allow the
// sign-extension to occur. We don't need this on AVX512BW as we can
// safely sign-extend to v32i16.
if (VT == MVT::v32i8 && !Subtarget.hasBWI())
return Lower256IntArith(Op, DAG);
MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
return DAG.getNode(
ISD::TRUNCATE, dl, VT,
DAG.getNode(ISD::MUL, dl, ExVT,
DAG.getNode(ISD::SIGN_EXTEND, dl, ExVT, A),
DAG.getNode(ISD::SIGN_EXTEND, dl, ExVT, B)));
}
assert(VT == MVT::v16i8 &&
"Pre-AVX2 support only supports v16i8 multiplication");
MVT ExVT = MVT::v8i16;
// Extract the lo parts and sign extend to i16
// We're going to mask off the low byte of each result element of the
// pmullw, so it doesn't matter what's in the high byte of each 16-bit
// element.
const int LoShufMask[] = {0, -1, 1, -1, 2, -1, 3, -1,
4, -1, 5, -1, 6, -1, 7, -1};
SDValue ALo = DAG.getVectorShuffle(VT, dl, A, A, LoShufMask);
SDValue BLo = DAG.getVectorShuffle(VT, dl, B, B, LoShufMask);
ALo = DAG.getBitcast(ExVT, ALo);
BLo = DAG.getBitcast(ExVT, BLo);
// Extract the hi parts and sign extend to i16
// We're going to mask off the low byte of each result element of the
// pmullw, so it doesn't matter what's in the high byte of each 16-bit
// element.
const int HiShufMask[] = {8, -1, 9, -1, 10, -1, 11, -1,
12, -1, 13, -1, 14, -1, 15, -1};
SDValue AHi = DAG.getVectorShuffle(VT, dl, A, A, HiShufMask);
SDValue BHi = DAG.getVectorShuffle(VT, dl, B, B, HiShufMask);
AHi = DAG.getBitcast(ExVT, AHi);
BHi = DAG.getBitcast(ExVT, BHi);
// Multiply, mask the lower 8bits of the lo/hi results and pack
SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
}
// Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
if (VT == MVT::v4i32) {
assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
"Should not custom lower when pmulld is available!");
// Extract the odd parts.
static const int UnpackMask[] = { 1, -1, 3, -1 };
SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
// Multiply the even parts.
SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
DAG.getBitcast(MVT::v2i64, A),
DAG.getBitcast(MVT::v2i64, B));
// Now multiply odd parts.
SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
DAG.getBitcast(MVT::v2i64, Aodds),
DAG.getBitcast(MVT::v2i64, Bodds));
Evens = DAG.getBitcast(VT, Evens);
Odds = DAG.getBitcast(VT, Odds);
// Merge the two vectors back together with a shuffle. This expands into 2
// shuffles.
static const int ShufMask[] = { 0, 4, 2, 6 };
return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
}
assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
"Only know how to lower V2I64/V4I64/V8I64 multiply");
assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
// Ahi = psrlqi(a, 32);
// Bhi = psrlqi(b, 32);
//
// AloBlo = pmuludq(a, b);
// AloBhi = pmuludq(a, Bhi);
// AhiBlo = pmuludq(Ahi, b);
//
// Hi = psllqi(AloBhi + AhiBlo, 32);
// return AloBlo + Hi;
KnownBits AKnown, BKnown;
DAG.computeKnownBits(A, AKnown);
DAG.computeKnownBits(B, BKnown);
APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
SDValue Zero = getZeroVector(VT, Subtarget, DAG, dl);
// Only multiply lo/hi halves that aren't known to be zero.
SDValue AloBlo = Zero;
if (!ALoIsZero && !BLoIsZero)
AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
SDValue AloBhi = Zero;
if (!ALoIsZero && !BHiIsZero) {
SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
}
SDValue AhiBlo = Zero;
if (!AHiIsZero && !BLoIsZero) {
SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
}
SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
}
static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
// Decompose 256-bit ops into smaller 128-bit ops.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntArith(Op, DAG);
// Only i8 vectors should need custom lowering after this.
assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
(VT == MVT::v64i8 && Subtarget.hasBWI())) &&
"Unsupported vector type");
// Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
// logical shift down the upper half and pack back to i8.
SDValue A = Op.getOperand(0);
SDValue B = Op.getOperand(1);
// With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
// and then ashr/lshr the upper bits down to the lower bits before multiply.
unsigned Opcode = Op.getOpcode();
unsigned ExShift = (ISD::MULHU == Opcode ? ISD::SRL : ISD::SRA);
unsigned ExAVX = (ISD::MULHU == Opcode ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND);
// For 512-bit vectors, split into 256-bit vectors to allow the
// sign-extension to occur.
if (VT == MVT::v64i8)
return Lower512IntArith(Op, DAG);
// AVX2 implementations - extend xmm subvectors to ymm.
if (Subtarget.hasInt256()) {
unsigned NumElems = VT.getVectorNumElements();
SDValue Lo = DAG.getIntPtrConstant(0, dl);
SDValue Hi = DAG.getIntPtrConstant(NumElems / 2, dl);
if (VT == MVT::v32i8) {
if (Subtarget.canExtendTo512BW()) {
SDValue ExA = DAG.getNode(ExAVX, dl, MVT::v32i16, A);
SDValue ExB = DAG.getNode(ExAVX, dl, MVT::v32i16, B);
SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v32i16, ExA, ExB);
Mul = DAG.getNode(ISD::SRL, dl, MVT::v32i16, Mul,
DAG.getConstant(8, dl, MVT::v32i16));
return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
}
SDValue ALo = extract128BitVector(A, 0, DAG, dl);
SDValue BLo = extract128BitVector(B, 0, DAG, dl);
SDValue AHi = extract128BitVector(A, NumElems / 2, DAG, dl);
SDValue BHi = extract128BitVector(B, NumElems / 2, DAG, dl);
ALo = DAG.getNode(ExAVX, dl, MVT::v16i16, ALo);
BLo = DAG.getNode(ExAVX, dl, MVT::v16i16, BLo);
AHi = DAG.getNode(ExAVX, dl, MVT::v16i16, AHi);
BHi = DAG.getNode(ExAVX, dl, MVT::v16i16, BHi);
Lo = DAG.getNode(ISD::SRL, dl, MVT::v16i16,
DAG.getNode(ISD::MUL, dl, MVT::v16i16, ALo, BLo),
DAG.getConstant(8, dl, MVT::v16i16));
Hi = DAG.getNode(ISD::SRL, dl, MVT::v16i16,
DAG.getNode(ISD::MUL, dl, MVT::v16i16, AHi, BHi),
DAG.getConstant(8, dl, MVT::v16i16));
// The ymm variant of PACKUS treats the 128-bit lanes separately, so before
// using PACKUS we need to permute the inputs to the correct lo/hi xmm lane.
const int LoMask[] = {0, 1, 2, 3, 4, 5, 6, 7,
16, 17, 18, 19, 20, 21, 22, 23};
const int HiMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
24, 25, 26, 27, 28, 29, 30, 31};
return DAG.getNode(X86ISD::PACKUS, dl, VT,
DAG.getVectorShuffle(MVT::v16i16, dl, Lo, Hi, LoMask),
DAG.getVectorShuffle(MVT::v16i16, dl, Lo, Hi, HiMask));
}
assert(VT == MVT::v16i8 && "Unexpected VT");
SDValue ExA = DAG.getNode(ExAVX, dl, MVT::v16i16, A);
SDValue ExB = DAG.getNode(ExAVX, dl, MVT::v16i16, B);
SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v16i16, ExA, ExB);
Mul = DAG.getNode(ISD::SRL, dl, MVT::v16i16, Mul,
DAG.getConstant(8, dl, MVT::v16i16));
// If we have BWI we can use truncate instruction.
if (Subtarget.hasBWI())
return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, Mul, Lo);
Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i16, Mul, Hi);
return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
}
assert(VT == MVT::v16i8 &&
"Pre-AVX2 support only supports v16i8 multiplication");
MVT ExVT = MVT::v8i16;
unsigned ExSSE41 = ISD::MULHU == Opcode ? ISD::ZERO_EXTEND_VECTOR_INREG
: ISD::SIGN_EXTEND_VECTOR_INREG;
// Extract the lo parts and zero/sign extend to i16.
SDValue ALo, BLo;
if (Subtarget.hasSSE41()) {
ALo = DAG.getNode(ExSSE41, dl, ExVT, A);
BLo = DAG.getNode(ExSSE41, dl, ExVT, B);
} else {
const int ShufMask[] = {-1, 0, -1, 1, -1, 2, -1, 3,
-1, 4, -1, 5, -1, 6, -1, 7};
ALo = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
ALo = DAG.getBitcast(ExVT, ALo);
BLo = DAG.getBitcast(ExVT, BLo);
ALo = DAG.getNode(ExShift, dl, ExVT, ALo, DAG.getConstant(8, dl, ExVT));
BLo = DAG.getNode(ExShift, dl, ExVT, BLo, DAG.getConstant(8, dl, ExVT));
}
// Extract the hi parts and zero/sign extend to i16.
SDValue AHi, BHi;
if (Subtarget.hasSSE41()) {
const int ShufMask[] = {8, 9, 10, 11, 12, 13, 14, 15,
-1, -1, -1, -1, -1, -1, -1, -1};
AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
AHi = DAG.getNode(ExSSE41, dl, ExVT, AHi);
BHi = DAG.getNode(ExSSE41, dl, ExVT, BHi);
} else {
const int ShufMask[] = {-1, 8, -1, 9, -1, 10, -1, 11,
-1, 12, -1, 13, -1, 14, -1, 15};
AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);
BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);
AHi = DAG.getBitcast(ExVT, AHi);
BHi = DAG.getBitcast(ExVT, BHi);
AHi = DAG.getNode(ExShift, dl, ExVT, AHi, DAG.getConstant(8, dl, ExVT));
BHi = DAG.getNode(ExShift, dl, ExVT, BHi, DAG.getConstant(8, dl, ExVT));
}
// Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
// pack back to v16i8.
SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
RLo = DAG.getNode(ISD::SRL, dl, ExVT, RLo, DAG.getConstant(8, dl, ExVT));
RHi = DAG.getNode(ISD::SRL, dl, ExVT, RHi, DAG.getConstant(8, dl, ExVT));
return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
}
SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
assert(Subtarget.isTargetWin64() && "Unexpected target");
EVT VT = Op.getValueType();
assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
"Unexpected return type for lowering");
RTLIB::Libcall LC;
bool isSigned;
switch (Op->getOpcode()) {
default: llvm_unreachable("Unexpected request for libcall!");
case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
}
SDLoc dl(Op);
SDValue InChain = DAG.getEntryNode();
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
EVT ArgVT = Op->getOperand(i).getValueType();
assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
"Unexpected argument type for lowering");
SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
Entry.Node = StackPtr;
InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
MachinePointerInfo(), /* Alignment = */ 16);
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
Entry.Ty = PointerType::get(ArgTy,0);
Entry.IsSExt = false;
Entry.IsZExt = false;
Args.push_back(Entry);
}
SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
getPointerTy(DAG.getDataLayout()));
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl)
.setChain(InChain)
.setLibCallee(
getLibcallCallingConv(LC),
static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
std::move(Args))
.setInRegister()
.setSExtResult(isSigned)
.setZExtResult(!isSigned);
std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
return DAG.getBitcast(VT, CallInfo.first);
}
static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
MVT VT = Op0.getSimpleValueType();
SDLoc dl(Op);
// Decompose 256-bit ops into smaller 128-bit ops.
if (VT.is256BitVector() && !Subtarget.hasInt256()) {
unsigned Opcode = Op.getOpcode();
unsigned NumElems = VT.getVectorNumElements();
MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), NumElems / 2);
SDValue Lo0 = extract128BitVector(Op0, 0, DAG, dl);
SDValue Lo1 = extract128BitVector(Op1, 0, DAG, dl);
SDValue Hi0 = extract128BitVector(Op0, NumElems / 2, DAG, dl);
SDValue Hi1 = extract128BitVector(Op1, NumElems / 2, DAG, dl);
SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Lo0, Lo1);
SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Hi0, Hi1);
SDValue Ops[] = {
DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(0), Hi.getValue(0)),
DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(1), Hi.getValue(1))
};
return DAG.getMergeValues(Ops, dl);
}
assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
(VT == MVT::v8i32 && Subtarget.hasInt256()) ||
(VT == MVT::v16i32 && Subtarget.hasAVX512()));
int NumElts = VT.getVectorNumElements();
// PMULxD operations multiply each even value (starting at 0) of LHS with
// the related value of RHS and produce a widen result.
// E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
// => <2 x i64> <ae|cg>
//
// In other word, to have all the results, we need to perform two PMULxD:
// 1. one with the even values.
// 2. one with the odd values.
// To achieve #2, with need to place the odd values at an even position.
//
// Place the odd value at an even position (basically, shift all values 1
// step to the left):
const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1, 9, -1, 11, -1, 13, -1, 15, -1};
// <a|b|c|d> => <b|undef|d|undef>
SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0,
makeArrayRef(&Mask[0], NumElts));
// <e|f|g|h> => <f|undef|h|undef>
SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1,
makeArrayRef(&Mask[0], NumElts));
// Emit two multiplies, one for the lower 2 ints and one for the higher 2
// ints.
MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
unsigned Opcode =
(!IsSigned || !Subtarget.hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
// PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
// => <2 x i64> <ae|cg>
SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
DAG.getBitcast(MulVT, Op0),
DAG.getBitcast(MulVT, Op1)));
// PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
// => <2 x i64> <bf|dh>
SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
DAG.getBitcast(MulVT, Odd0),
DAG.getBitcast(MulVT, Odd1)));
// Shuffle it back into the right order.
SmallVector<int, 16> HighMask(NumElts);
SmallVector<int, 16> LowMask(NumElts);
for (int i = 0; i != NumElts; ++i) {
HighMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
LowMask[i] = (i / 2) * 2 + ((i % 2) * NumElts);
}
SDValue Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
SDValue Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
// If we have a signed multiply but no PMULDQ fix up the high parts of a
// unsigned multiply.
if (IsSigned && !Subtarget.hasSSE41()) {
SDValue ShAmt = DAG.getConstant(
31, dl,
DAG.getTargetLoweringInfo().getShiftAmountTy(VT, DAG.getDataLayout()));
SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
}
// The first result of MUL_LOHI is actually the low value, followed by the
// high value.
SDValue Ops[] = {Lows, Highs};
return DAG.getMergeValues(Ops, dl);
}
// Return true if the required (according to Opcode) shift-imm form is natively
// supported by the Subtarget
static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
unsigned Opcode) {
if (VT.getScalarSizeInBits() < 16)
return false;
if (VT.is512BitVector() && Subtarget.hasAVX512() &&
(VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
return true;
bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
(VT.is256BitVector() && Subtarget.hasInt256());
bool AShift = LShift && (Subtarget.hasAVX512() ||
(VT != MVT::v2i64 && VT != MVT::v4i64));
return (Opcode == ISD::SRA) ? AShift : LShift;
}
// The shift amount is a variable, but it is the same for all vector lanes.
// These instructions are defined together with shift-immediate.
static
bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
unsigned Opcode) {
return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
}
// Return true if the required (according to Opcode) variable-shift form is
// natively supported by the Subtarget
static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
unsigned Opcode) {
if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
return false;
// vXi16 supported only on AVX-512, BWI
if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
return false;
if (Subtarget.hasAVX512())
return true;
bool LShift = VT.is128BitVector() || VT.is256BitVector();
bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
return (Opcode == ISD::SRA) ? AShift : LShift;
}
static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
unsigned X86Opc = (Op.getOpcode() == ISD::SHL) ? X86ISD::VSHLI :
(Op.getOpcode() == ISD::SRL) ? X86ISD::VSRLI : X86ISD::VSRAI;
auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
SDValue Ex = DAG.getBitcast(ExVT, R);
// ashr(R, 63) === cmp_slt(R, 0)
if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
"Unsupported PCMPGT op");
return DAG.getNode(X86ISD::PCMPGT, dl, VT,
getZeroVector(VT, Subtarget, DAG, dl), R);
}
if (ShiftAmt >= 32) {
// Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
SDValue Upper =
getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
ShiftAmt - 32, DAG);
if (VT == MVT::v2i64)
Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
if (VT == MVT::v4i64)
Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
{9, 1, 11, 3, 13, 5, 15, 7});
} else {
// SRA upper i32, SHL whole i64 and select lower i32.
SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
ShiftAmt, DAG);
SDValue Lower =
getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
Lower = DAG.getBitcast(ExVT, Lower);
if (VT == MVT::v2i64)
Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
if (VT == MVT::v4i64)
Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
{8, 1, 10, 3, 12, 5, 14, 7});
}
return DAG.getBitcast(VT, Ex);
};
// Optimize shl/srl/sra with constant shift amount.
if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
uint64_t ShiftAmt = ShiftConst->getZExtValue();
if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
// i64 SRA needs to be performed as partial shifts.
if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
(Subtarget.hasInt256() && VT == MVT::v4i64)) &&
Op.getOpcode() == ISD::SRA)
return ArithmeticShiftRight64(ShiftAmt);
if (VT == MVT::v16i8 ||
(Subtarget.hasInt256() && VT == MVT::v32i8) ||
VT == MVT::v64i8) {
unsigned NumElts = VT.getVectorNumElements();
MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
// Simple i8 add case
if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
return DAG.getNode(ISD::ADD, dl, VT, R, R);
// ashr(R, 7) === cmp_slt(R, 0)
if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
if (VT.is512BitVector()) {
assert(VT == MVT::v64i8 && "Unexpected element type!");
SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R,
ISD::SETGT);
return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
}
return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
}
// XOP can shift v16i8 directly instead of as shift v8i16 + mask.
if (VT == MVT::v16i8 && Subtarget.hasXOP())
return SDValue();
if (Op.getOpcode() == ISD::SHL) {
// Make a large shift.
SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT,
R, ShiftAmt, DAG);
SHL = DAG.getBitcast(VT, SHL);
// Zero out the rightmost bits.
return DAG.getNode(ISD::AND, dl, VT, SHL,
DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, VT));
}
if (Op.getOpcode() == ISD::SRL) {
// Make a large shift.
SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT,
R, ShiftAmt, DAG);
SRL = DAG.getBitcast(VT, SRL);
// Zero out the leftmost bits.
return DAG.getNode(ISD::AND, dl, VT, SRL,
DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
}
if (Op.getOpcode() == ISD::SRA) {
// ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
return Res;
}
llvm_unreachable("Unknown shift opcode.");
}
}
}
// Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
// TODO: Replace constant extraction with getTargetConstantBitsFromNode.
if (!Subtarget.hasXOP() &&
(VT == MVT::v2i64 || (Subtarget.hasInt256() && VT == MVT::v4i64) ||
(Subtarget.hasAVX512() && VT == MVT::v8i64))) {
// AVX1 targets maybe extracting a 128-bit vector from a 256-bit constant.
unsigned SubVectorScale = 1;
if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
SubVectorScale =
Amt.getOperand(0).getValueSizeInBits() / Amt.getValueSizeInBits();
Amt = Amt.getOperand(0);
}
// Peek through any splat that was introduced for i64 shift vectorization.
int SplatIndex = -1;
if (ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt.getNode()))
if (SVN->isSplat()) {
SplatIndex = SVN->getSplatIndex();
Amt = Amt.getOperand(0);
assert(SplatIndex < (int)VT.getVectorNumElements() &&
"Splat shuffle referencing second operand");
}
if (Amt.getOpcode() != ISD::BITCAST ||
Amt.getOperand(0).getOpcode() != ISD::BUILD_VECTOR)
return SDValue();
Amt = Amt.getOperand(0);
unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
(SubVectorScale * VT.getVectorNumElements());
unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
uint64_t ShiftAmt = 0;
unsigned BaseOp = (SplatIndex < 0 ? 0 : SplatIndex * Ratio);
for (unsigned i = 0; i != Ratio; ++i) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i + BaseOp));
if (!C)
return SDValue();
// 6 == Log2(64)
ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
}
// Check remaining shift amounts (if not a splat).
if (SplatIndex < 0) {
for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
uint64_t ShAmt = 0;
for (unsigned j = 0; j != Ratio; ++j) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
if (!C)
return SDValue();
// 6 == Log2(64)
ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
}
if (ShAmt != ShiftAmt)
return SDValue();
}
}
if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
if (Op.getOpcode() == ISD::SRA)
return ArithmeticShiftRight64(ShiftAmt);
}
return SDValue();
}
// Determine if V is a splat value, and return the scalar.
static SDValue IsSplatValue(MVT VT, SDValue V, const SDLoc &dl,
SelectionDAG &DAG, const X86Subtarget &Subtarget,
unsigned Opcode) {
V = peekThroughEXTRACT_SUBVECTORs(V);
// Check if this is a splat build_vector node.
if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V)) {
SDValue SplatAmt = BV->getSplatValue();
if (SplatAmt && SplatAmt.isUndef())
return SDValue();
return SplatAmt;
}
// Check for SUB(SPLAT_BV, SPLAT) cases from rotate patterns.
if (V.getOpcode() == ISD::SUB &&
!SupportedVectorVarShift(VT, Subtarget, Opcode)) {
SDValue LHS = peekThroughEXTRACT_SUBVECTORs(V.getOperand(0));
SDValue RHS = peekThroughEXTRACT_SUBVECTORs(V.getOperand(1));
// Ensure that the corresponding splat BV element is not UNDEF.
BitVector UndefElts;
BuildVectorSDNode *BV0 = dyn_cast<BuildVectorSDNode>(LHS);
ShuffleVectorSDNode *SVN1 = dyn_cast<ShuffleVectorSDNode>(RHS);
if (BV0 && SVN1 && BV0->getSplatValue(&UndefElts) && SVN1->isSplat()) {
unsigned SplatIdx = (unsigned)SVN1->getSplatIndex();
if (!UndefElts[SplatIdx])
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
VT.getVectorElementType(), V,
DAG.getIntPtrConstant(SplatIdx, dl));
}
}
// Check if this is a shuffle node doing a splat.
ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(V);
if (!SVN || !SVN->isSplat())
return SDValue();
unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
SDValue InVec = V.getOperand(0);
if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
assert((SplatIdx < VT.getVectorNumElements()) &&
"Unexpected shuffle index found!");
return InVec.getOperand(SplatIdx);
} else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(InVec.getOperand(2)))
if (C->getZExtValue() == SplatIdx)
return InVec.getOperand(1);
}
// Avoid introducing an extract element from a shuffle.
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
VT.getVectorElementType(), InVec,
DAG.getIntPtrConstant(SplatIdx, dl));
}
static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
unsigned Opcode = Op.getOpcode();
unsigned X86OpcI = (Opcode == ISD::SHL) ? X86ISD::VSHLI :
(Opcode == ISD::SRL) ? X86ISD::VSRLI : X86ISD::VSRAI;
unsigned X86OpcV = (Opcode == ISD::SHL) ? X86ISD::VSHL :
(Opcode == ISD::SRL) ? X86ISD::VSRL : X86ISD::VSRA;
Amt = peekThroughEXTRACT_SUBVECTORs(Amt);
if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
if (SDValue BaseShAmt = IsSplatValue(VT, Amt, dl, DAG, Subtarget, Opcode)) {
MVT EltVT = VT.getVectorElementType();
assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
else if (EltVT.bitsLT(MVT::i32))
BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
}
}
// Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
- if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
+ if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
Amt = Amt.getOperand(0);
- unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
- VT.getVectorNumElements();
+ unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
std::vector<SDValue> Vals(Ratio);
for (unsigned i = 0; i != Ratio; ++i)
Vals[i] = Amt.getOperand(i);
- for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
+ for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
for (unsigned j = 0; j != Ratio; ++j)
if (Vals[j] != Amt.getOperand(i + j))
return SDValue();
}
if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
}
return SDValue();
}
// Convert a shift/rotate left amount to a multiplication scale factor.
static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Amt.getSimpleValueType();
if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
(Subtarget.hasInt256() && VT == MVT::v16i16) ||
(!Subtarget.hasAVX512() && VT == MVT::v16i8)))
return SDValue();
if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
SmallVector<SDValue, 8> Elts;
MVT SVT = VT.getVectorElementType();
unsigned SVTBits = SVT.getSizeInBits();
APInt One(SVTBits, 1);
unsigned NumElems = VT.getVectorNumElements();
for (unsigned i = 0; i != NumElems; ++i) {
SDValue Op = Amt->getOperand(i);
if (Op->isUndef()) {
Elts.push_back(Op);
continue;
}
ConstantSDNode *ND = cast<ConstantSDNode>(Op);
APInt C(SVTBits, ND->getAPIntValue().getZExtValue());
uint64_t ShAmt = C.getZExtValue();
if (ShAmt >= SVTBits) {
Elts.push_back(DAG.getUNDEF(SVT));
continue;
}
Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
}
return DAG.getBuildVector(VT, dl, Elts);
}
// If the target doesn't support variable shifts, use either FP conversion
// or integer multiplication to avoid shifting each element individually.
if (VT == MVT::v4i32) {
Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
DAG.getConstant(0x3f800000U, dl, VT));
Amt = DAG.getBitcast(MVT::v4f32, Amt);
return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
}
// AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
if (Subtarget.hasSSE41())
return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
DAG.getBitcast(VT, Hi),
{0, 2, 4, 6, 8, 10, 12, 14});
}
return SDValue();
}
static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
SDLoc dl(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
assert(VT.isVector() && "Custom lowering only for vector shifts!");
assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
return V;
if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
return V;
if (SupportedVectorVarShift(VT, Subtarget, Op.getOpcode()))
return Op;
// XOP has 128-bit variable logical/arithmetic shifts.
// +ve/-ve Amt = shift left/right.
if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
VT == MVT::v8i16 || VT == MVT::v16i8)) {
if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SRA) {
SDValue Zero = DAG.getConstant(0, dl, VT);
Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
}
if (Op.getOpcode() == ISD::SHL || Op.getOpcode() == ISD::SRL)
return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
if (Op.getOpcode() == ISD::SRA)
return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
}
// 2i64 vector logical shifts can efficiently avoid scalarization - do the
// shifts per-lane and then shuffle the partial results back together.
if (VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) {
// Splat the shift amounts so the scalar shifts above will catch it.
SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
SDValue R0 = DAG.getNode(Op->getOpcode(), dl, VT, R, Amt0);
SDValue R1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Amt1);
return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
}
// i64 vector arithmetic shift can be emulated with the transform:
// M = lshr(SIGN_MASK, Amt)
// ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
Op.getOpcode() == ISD::SRA) {
SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
R = DAG.getNode(ISD::XOR, dl, VT, R, M);
R = DAG.getNode(ISD::SUB, dl, VT, R, M);
return R;
}
// If possible, lower this shift as a sequence of two shifts by
// constant plus a BLENDing shuffle instead of scalarizing it.
// Example:
// (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
//
// Could be rewritten as:
// (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
//
// The advantage is that the two shifts from the example would be
// lowered as X86ISD::VSRLI nodes in parallel before blending.
if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
(VT == MVT::v16i16 && Subtarget.hasInt256()))) {
SDValue Amt1, Amt2;
unsigned NumElts = VT.getVectorNumElements();
SmallVector<int, 8> ShuffleMask;
for (unsigned i = 0; i != NumElts; ++i) {
SDValue A = Amt->getOperand(i);
if (A.isUndef()) {
ShuffleMask.push_back(SM_SentinelUndef);
continue;
}
if (!Amt1 || Amt1 == A) {
ShuffleMask.push_back(i);
Amt1 = A;
continue;
}
if (!Amt2 || Amt2 == A) {
ShuffleMask.push_back(i + NumElts);
Amt2 = A;
continue;
}
break;
}
// Only perform this blend if we can perform it without loading a mask.
if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
isa<ConstantSDNode>(Amt1) && isa<ConstantSDNode>(Amt2) &&
(VT != MVT::v16i16 ||
is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
(VT == MVT::v4i32 || Subtarget.hasSSE41() ||
Op.getOpcode() != ISD::SHL || canWidenShuffleElements(ShuffleMask))) {
SDValue Splat1 =
DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), dl, VT);
SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
SDValue Splat2 =
DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), dl, VT);
SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
}
}
// If possible, lower this packed shift into a vector multiply instead of
// expanding it into a sequence of scalar shifts.
if (Op.getOpcode() == ISD::SHL)
if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
// Constant ISD::SRL can be performed efficiently on vXi8/vXi16 vectors as we
// can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
// TODO: Improve support for the shift by zero special case.
if (Op.getOpcode() == ISD::SRL && ConstantAmt &&
((Subtarget.hasSSE41() && VT == MVT::v8i16) ||
DAG.isKnownNeverZero(Amt)) &&
(VT == MVT::v16i8 || VT == MVT::v8i16 ||
((VT == MVT::v32i8 || VT == MVT::v16i16) && Subtarget.hasInt256()))) {
SDValue EltBits = DAG.getConstant(VT.getScalarSizeInBits(), dl, VT);
SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
SDValue Zero = DAG.getConstant(0, dl, VT);
SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
return DAG.getSelect(dl, VT, ZAmt, R, Res);
}
}
// v4i32 Non Uniform Shifts.
// If the shift amount is constant we can shift each lane using the SSE2
// immediate shifts, else we need to zero-extend each lane to the lower i64
// and shift using the SSE2 variable shifts.
// The separate results can then be blended together.
if (VT == MVT::v4i32) {
unsigned Opc = Op.getOpcode();
SDValue Amt0, Amt1, Amt2, Amt3;
if (ConstantAmt) {
Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
} else {
// ISD::SHL is handled above but we include it here for completeness.
switch (Opc) {
default:
llvm_unreachable("Unknown target vector shift node");
case ISD::SHL:
Opc = X86ISD::VSHL;
break;
case ISD::SRL:
Opc = X86ISD::VSRL;
break;
case ISD::SRA:
Opc = X86ISD::VSRA;
break;
}
// The SSE2 shifts use the lower i64 as the same shift amount for
// all lanes and the upper i64 is ignored. On AVX we're better off
// just zero-extending, but for SSE just duplicating the top 16-bits is
// cheaper and has the same effect for out of range values.
if (Subtarget.hasAVX()) {
SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
} else {
SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
{4, 5, 6, 7, -1, -1, -1, -1});
Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
{0, 1, 1, 1, -1, -1, -1, -1});
Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
{2, 3, 3, 3, -1, -1, -1, -1});
Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
{0, 1, 1, 1, -1, -1, -1, -1});
Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
{2, 3, 3, 3, -1, -1, -1, -1});
}
}
SDValue R0 = DAG.getNode(Opc, dl, VT, R, DAG.getBitcast(VT, Amt0));
SDValue R1 = DAG.getNode(Opc, dl, VT, R, DAG.getBitcast(VT, Amt1));
SDValue R2 = DAG.getNode(Opc, dl, VT, R, DAG.getBitcast(VT, Amt2));
SDValue R3 = DAG.getNode(Opc, dl, VT, R, DAG.getBitcast(VT, Amt3));
// Merge the shifted lane results optimally with/without PBLENDW.
// TODO - ideally shuffle combining would handle this.
if (Subtarget.hasSSE41()) {
SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
}
SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
}
// It's worth extending once and using the vXi16/vXi32 shifts for smaller
// types, but without AVX512 the extra overheads to get from vXi8 to vXi32
// make the existing SSE solution better.
// NOTE: We honor prefered vector width before promoting to 512-bits.
if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
(Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
(Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
(Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
(Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
"Unexpected vector type");
MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
unsigned ExtOpc =
Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
R = DAG.getNode(ExtOpc, dl, ExtVT, R);
Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
return DAG.getNode(ISD::TRUNCATE, dl, VT,
DAG.getNode(Op.getOpcode(), dl, ExtVT, R, Amt));
}
if (VT == MVT::v16i8 ||
(VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
(VT == MVT::v64i8 && Subtarget.hasBWI())) {
MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
unsigned ShiftOpcode = Op->getOpcode();
auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
if (VT.is512BitVector()) {
// On AVX512BW targets we make use of the fact that VSELECT lowers
// to a masked blend which selects bytes based just on the sign bit
// extracted to a mask.
MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
V0 = DAG.getBitcast(VT, V0);
V1 = DAG.getBitcast(VT, V1);
Sel = DAG.getBitcast(VT, Sel);
Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
ISD::SETGT);
return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
} else if (Subtarget.hasSSE41()) {
// On SSE41 targets we make use of the fact that VSELECT lowers
// to PBLENDVB which selects bytes based just on the sign bit.
V0 = DAG.getBitcast(VT, V0);
V1 = DAG.getBitcast(VT, V1);
Sel = DAG.getBitcast(VT, Sel);
return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
}
// On pre-SSE41 targets we test for the sign bit by comparing to
// zero - a negative value will set all bits of the lanes to true
// and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
SDValue Z = getZeroVector(SelVT, Subtarget, DAG, dl);
SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
return DAG.getSelect(dl, SelVT, C, V0, V1);
};
// Turn 'a' into a mask suitable for VSELECT: a = a << 5;
// We can safely do this using i16 shifts as we're only interested in
// the 3 lower bits of each byte.
Amt = DAG.getBitcast(ExtVT, Amt);
Amt = DAG.getNode(ISD::SHL, dl, ExtVT, Amt, DAG.getConstant(5, dl, ExtVT));
Amt = DAG.getBitcast(VT, Amt);
if (Op->getOpcode() == ISD::SHL || Op->getOpcode() == ISD::SRL) {
// r = VSELECT(r, shift(r, 4), a);
SDValue M =
DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
R = SignBitSelect(VT, Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// r = VSELECT(r, shift(r, 2), a);
M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
R = SignBitSelect(VT, Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// return VSELECT(r, shift(r, 1), a);
M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
R = SignBitSelect(VT, Amt, M, R);
return R;
}
if (Op->getOpcode() == ISD::SRA) {
// For SRA we need to unpack each byte to the higher byte of a i16 vector
// so we can correctly sign extend. We don't care what happens to the
// lower byte.
SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), Amt);
SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), Amt);
SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, DAG.getUNDEF(VT), R);
SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, DAG.getUNDEF(VT), R);
ALo = DAG.getBitcast(ExtVT, ALo);
AHi = DAG.getBitcast(ExtVT, AHi);
RLo = DAG.getBitcast(ExtVT, RLo);
RHi = DAG.getBitcast(ExtVT, RHi);
// r = VSELECT(r, shift(r, 4), a);
SDValue MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
DAG.getConstant(4, dl, ExtVT));
SDValue MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
DAG.getConstant(4, dl, ExtVT));
RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
// a += a
ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
// r = VSELECT(r, shift(r, 2), a);
MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
DAG.getConstant(2, dl, ExtVT));
MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
DAG.getConstant(2, dl, ExtVT));
RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
// a += a
ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
// r = VSELECT(r, shift(r, 1), a);
MLo = DAG.getNode(ShiftOpcode, dl, ExtVT, RLo,
DAG.getConstant(1, dl, ExtVT));
MHi = DAG.getNode(ShiftOpcode, dl, ExtVT, RHi,
DAG.getConstant(1, dl, ExtVT));
RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
// Logical shift the result back to the lower byte, leaving a zero upper
// byte
// meaning that we can safely pack with PACKUSWB.
RLo =
DAG.getNode(ISD::SRL, dl, ExtVT, RLo, DAG.getConstant(8, dl, ExtVT));
RHi =
DAG.getNode(ISD::SRL, dl, ExtVT, RHi, DAG.getConstant(8, dl, ExtVT));
return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
}
}
if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
MVT ExtVT = MVT::v8i32;
SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Amt, Z);
SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Amt, Z);
SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Z, R);
SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Z, R);
ALo = DAG.getBitcast(ExtVT, ALo);
AHi = DAG.getBitcast(ExtVT, AHi);
RLo = DAG.getBitcast(ExtVT, RLo);
RHi = DAG.getBitcast(ExtVT, RHi);
SDValue Lo = DAG.getNode(Op.getOpcode(), dl, ExtVT, RLo, ALo);
SDValue Hi = DAG.getNode(Op.getOpcode(), dl, ExtVT, RHi, AHi);
Lo = DAG.getNode(ISD::SRL, dl, ExtVT, Lo, DAG.getConstant(16, dl, ExtVT));
Hi = DAG.getNode(ISD::SRL, dl, ExtVT, Hi, DAG.getConstant(16, dl, ExtVT));
return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
}
if (VT == MVT::v8i16) {
unsigned ShiftOpcode = Op->getOpcode();
// If we have a constant shift amount, the non-SSE41 path is best as
// avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
bool UseSSE41 = Subtarget.hasSSE41() &&
!ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
// On SSE41 targets we make use of the fact that VSELECT lowers
// to PBLENDVB which selects bytes based just on the sign bit.
if (UseSSE41) {
MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
V0 = DAG.getBitcast(ExtVT, V0);
V1 = DAG.getBitcast(ExtVT, V1);
Sel = DAG.getBitcast(ExtVT, Sel);
return DAG.getBitcast(VT, DAG.getSelect(dl, ExtVT, Sel, V0, V1));
}
// On pre-SSE41 targets we splat the sign bit - a negative value will
// set all bits of the lanes to true and VSELECT uses that in
// its OR(AND(V0,C),AND(V1,~C)) lowering.
SDValue C =
DAG.getNode(ISD::SRA, dl, VT, Sel, DAG.getConstant(15, dl, VT));
return DAG.getSelect(dl, VT, C, V0, V1);
};
// Turn 'a' into a mask suitable for VSELECT: a = a << 12;
if (UseSSE41) {
// On SSE41 targets we need to replicate the shift mask in both
// bytes for PBLENDVB.
Amt = DAG.getNode(
ISD::OR, dl, VT,
DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(4, dl, VT)),
DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT)));
} else {
Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(12, dl, VT));
}
// r = VSELECT(r, shift(r, 8), a);
SDValue M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(8, dl, VT));
R = SignBitSelect(Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// r = VSELECT(r, shift(r, 4), a);
M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(4, dl, VT));
R = SignBitSelect(Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// r = VSELECT(r, shift(r, 2), a);
M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(2, dl, VT));
R = SignBitSelect(Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
// return VSELECT(r, shift(r, 1), a);
M = DAG.getNode(ShiftOpcode, dl, VT, R, DAG.getConstant(1, dl, VT));
R = SignBitSelect(Amt, M, R);
return R;
}
// Decompose 256-bit shifts into smaller 128-bit shifts.
if (VT.is256BitVector())
return Lower256IntArith(Op, DAG);
return SDValue();
}
static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert(VT.isVector() && "Custom lowering only for vector rotates!");
SDLoc DL(Op);
SDValue R = Op.getOperand(0);
SDValue Amt = Op.getOperand(1);
unsigned Opcode = Op.getOpcode();
unsigned EltSizeInBits = VT.getScalarSizeInBits();
if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
// Attempt to rotate by immediate.
APInt UndefElts;
SmallVector<APInt, 16> EltBits;
if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits)) {
if (!UndefElts && llvm::all_of(EltBits, [EltBits](APInt &V) {
return EltBits[0] == V;
})) {
unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
uint64_t RotateAmt = EltBits[0].urem(EltSizeInBits);
return DAG.getNode(Op, DL, VT, R,
DAG.getConstant(RotateAmt, DL, MVT::i8));
}
}
// Else, fall-back on VPROLV/VPRORV.
return Op;
}
assert((Opcode == ISD::ROTL) && "Only ROTL supported");
// XOP has 128-bit vector variable + immediate rotates.
// +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
if (Subtarget.hasXOP()) {
// Split 256-bit integers.
if (VT.is256BitVector())
return Lower256IntArith(Op, DAG);
assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
// Attempt to rotate by immediate.
if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
if (auto *RotateConst = BVAmt->getConstantSplatNode()) {
uint64_t RotateAmt = RotateConst->getAPIntValue().getZExtValue();
assert(RotateAmt < EltSizeInBits && "Rotation out of range");
return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
DAG.getConstant(RotateAmt, DL, MVT::i8));
}
}
// Use general rotate by variable (per-element).
return Op;
}
// Split 256-bit integers on pre-AVX2 targets.
if (VT.is256BitVector() && !Subtarget.hasAVX2())
return Lower256IntArith(Op, DAG);
assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
Subtarget.hasAVX2())) &&
"Only vXi32/vXi16/vXi8 vector rotates supported");
// Rotate by an uniform constant - expand back to shifts.
// TODO - legalizers should be able to handle this.
if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
if (auto *RotateConst = BVAmt->getConstantSplatNode()) {
uint64_t RotateAmt = RotateConst->getAPIntValue().getZExtValue();
assert(RotateAmt < EltSizeInBits && "Rotation out of range");
if (RotateAmt == 0)
return R;
SDValue AmtR = DAG.getConstant(EltSizeInBits - RotateAmt, DL, VT);
SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
}
}
// Rotate by splat - expand back to shifts.
// TODO - legalizers should be able to handle this.
if ((EltSizeInBits >= 16 || Subtarget.hasBWI()) &&
IsSplatValue(VT, Amt, DL, DAG, Subtarget, Opcode)) {
SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
}
// v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
// the amount bit.
if (EltSizeInBits == 8) {
if (Subtarget.hasBWI()) {
SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
}
MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
if (Subtarget.hasSSE41()) {
// On SSE41 targets we make use of the fact that VSELECT lowers
// to PBLENDVB which selects bytes based just on the sign bit.
V0 = DAG.getBitcast(VT, V0);
V1 = DAG.getBitcast(VT, V1);
Sel = DAG.getBitcast(VT, Sel);
return DAG.getBitcast(SelVT, DAG.getSelect(DL, VT, Sel, V0, V1));
}
// On pre-SSE41 targets we test for the sign bit by comparing to
// zero - a negative value will set all bits of the lanes to true
// and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
SDValue Z = getZeroVector(SelVT, Subtarget, DAG, DL);
SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
return DAG.getSelect(DL, SelVT, C, V0, V1);
};
// Turn 'a' into a mask suitable for VSELECT: a = a << 5;
// We can safely do this using i16 shifts as we're only interested in
// the 3 lower bits of each byte.
Amt = DAG.getBitcast(ExtVT, Amt);
Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
Amt = DAG.getBitcast(VT, Amt);
// r = VSELECT(r, rot(r, 4), a);
SDValue M;
M = DAG.getNode(
ISD::OR, DL, VT,
DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
R = SignBitSelect(VT, Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
// r = VSELECT(r, rot(r, 2), a);
M = DAG.getNode(
ISD::OR, DL, VT,
DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
R = SignBitSelect(VT, Amt, M, R);
// a += a
Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
// return VSELECT(r, rot(r, 1), a);
M = DAG.getNode(
ISD::OR, DL, VT,
DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
return SignBitSelect(VT, Amt, M, R);
}
bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
// Best to fallback for all supported variable shifts.
// AVX2 - best to fallback for non-constants as well.
// TODO - legalizers should be able to handle this.
if (LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
}
// As with shifts, convert the rotation amount to a multiplication factor.
SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
assert(Scale && "Failed to convert ROTL amount to scale");
// v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
if (EltSizeInBits == 16) {
SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
}
// v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
// to v2i64 results at a time. The upper 32-bits contain the wrapped bits
// that can then be OR'd with the lower 32-bits.
assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
static const int OddMask[] = {1, -1, 3, -1};
SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
DAG.getBitcast(MVT::v2i64, R),
DAG.getBitcast(MVT::v2i64, Scale));
SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
DAG.getBitcast(MVT::v2i64, R13),
DAG.getBitcast(MVT::v2i64, Scale13));
Res02 = DAG.getBitcast(VT, Res02);
Res13 = DAG.getBitcast(VT, Res13);
return DAG.getNode(ISD::OR, DL, VT,
DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
}
static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
// Lower the "add/sub/mul with overflow" instruction into a regular ins plus
// a "setcc" instruction that checks the overflow flag. The "brcond" lowering
// looks for this combo and may remove the "setcc" instruction if the "setcc"
// has only one use.
SDNode *N = Op.getNode();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
unsigned BaseOp = 0;
X86::CondCode Cond;
SDLoc DL(Op);
switch (Op.getOpcode()) {
default: llvm_unreachable("Unknown ovf instruction!");
case ISD::SADDO:
// A subtract of one will be selected as a INC. Note that INC doesn't
// set CF, so we can't do this for UADDO.
if (isOneConstant(RHS)) {
BaseOp = X86ISD::INC;
Cond = X86::COND_O;
break;
}
BaseOp = X86ISD::ADD;
Cond = X86::COND_O;
break;
case ISD::UADDO:
BaseOp = X86ISD::ADD;
Cond = X86::COND_B;
break;
case ISD::SSUBO:
// A subtract of one will be selected as a DEC. Note that DEC doesn't
// set CF, so we can't do this for USUBO.
if (isOneConstant(RHS)) {
BaseOp = X86ISD::DEC;
Cond = X86::COND_O;
break;
}
BaseOp = X86ISD::SUB;
Cond = X86::COND_O;
break;
case ISD::USUBO:
BaseOp = X86ISD::SUB;
Cond = X86::COND_B;
break;
case ISD::SMULO:
BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
Cond = X86::COND_O;
break;
case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
if (N->getValueType(0) == MVT::i8) {
BaseOp = X86ISD::UMUL8;
Cond = X86::COND_O;
break;
}
SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
MVT::i32);
SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
SDValue SetCC = getSETCC(X86::COND_O, SDValue(Sum.getNode(), 2), DL, DAG);
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
}
}
// Also sets EFLAGS.
SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
SDValue SetCC = getSETCC(Cond, SDValue(Sum.getNode(), 1), DL, DAG);
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
}
/// Returns true if the operand type is exactly twice the native width, and
/// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
/// Used to know whether to use cmpxchg8/16b when expanding atomic operations
/// (otherwise we leave them alone to become __sync_fetch_and_... calls).
bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
unsigned OpWidth = MemType->getPrimitiveSizeInBits();
if (OpWidth == 64)
return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
else if (OpWidth == 128)
return Subtarget.hasCmpxchg16b();
else
return false;
}
bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
return needsCmpXchgNb(SI->getValueOperand()->getType());
}
// Note: this turns large loads into lock cmpxchg8b/16b.
// FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
auto PTy = cast<PointerType>(LI->getPointerOperandType());
return needsCmpXchgNb(PTy->getElementType()) ? AtomicExpansionKind::CmpXChg
: AtomicExpansionKind::None;
}
TargetLowering::AtomicExpansionKind
X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
Type *MemType = AI->getType();
// If the operand is too big, we must see if cmpxchg8/16b is available
// and default to library calls otherwise.
if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
: AtomicExpansionKind::None;
}
AtomicRMWInst::BinOp Op = AI->getOperation();
switch (Op) {
default:
llvm_unreachable("Unknown atomic operation");
case AtomicRMWInst::Xchg:
case AtomicRMWInst::Add:
case AtomicRMWInst::Sub:
// It's better to use xadd, xsub or xchg for these in all cases.
return AtomicExpansionKind::None;
case AtomicRMWInst::Or:
case AtomicRMWInst::And:
case AtomicRMWInst::Xor:
// If the atomicrmw's result isn't actually used, we can just add a "lock"
// prefix to a normal instruction for these operations.
return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
: AtomicExpansionKind::None;
case AtomicRMWInst::Nand:
case AtomicRMWInst::Max:
case AtomicRMWInst::Min:
case AtomicRMWInst::UMax:
case AtomicRMWInst::UMin:
// These always require a non-trivial set of data operations on x86. We must
// use a cmpxchg loop.
return AtomicExpansionKind::CmpXChg;
}
}
LoadInst *
X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
Type *MemType = AI->getType();
// Accesses larger than the native width are turned into cmpxchg/libcalls, so
// there is no benefit in turning such RMWs into loads, and it is actually
// harmful as it introduces a mfence.
if (MemType->getPrimitiveSizeInBits() > NativeWidth)
return nullptr;
auto Builder = IRBuilder<>(AI);
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
auto SSID = AI->getSyncScopeID();
// We must restrict the ordering to avoid generating loads with Release or
// ReleaseAcquire orderings.
auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
auto Ptr = AI->getPointerOperand();
// Before the load we need a fence. Here is an example lifted from
// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
// is required:
// Thread 0:
// x.store(1, relaxed);
// r1 = y.fetch_add(0, release);
// Thread 1:
// y.fetch_add(42, acquire);
// r2 = x.load(relaxed);
// r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
// lowered to just a load without a fence. A mfence flushes the store buffer,
// making the optimization clearly correct.
// FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
// otherwise, we might be able to be more aggressive on relaxed idempotent
// rmw. In practice, they do not look useful, so we don't try to be
// especially clever.
if (SSID == SyncScope::SingleThread)
// FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
// the IR level, so we must wrap it in an intrinsic.
return nullptr;
if (!Subtarget.hasMFence())
// FIXME: it might make sense to use a locked operation here but on a
// different cache-line to prevent cache-line bouncing. In practice it
// is probably a small win, and x86 processors without mfence are rare
// enough that we do not bother.
return nullptr;
Function *MFence =
llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
Builder.CreateCall(MFence, {});
// Finally we can emit the atomic load.
LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
AI->getType()->getPrimitiveSizeInBits());
Loaded->setAtomic(Order, SSID);
AI->replaceAllUsesWith(Loaded);
AI->eraseFromParent();
return Loaded;
}
static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc dl(Op);
AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
// The only fence that needs an instruction is a sequentially-consistent
// cross-thread fence.
if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
FenceSSID == SyncScope::System) {
if (Subtarget.hasMFence())
return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
SDValue Chain = Op.getOperand(0);
SDValue Zero = DAG.getConstant(0, dl, MVT::i32);
SDValue Ops[] = {
DAG.getRegister(X86::ESP, MVT::i32), // Base
DAG.getTargetConstant(1, dl, MVT::i8), // Scale
DAG.getRegister(0, MVT::i32), // Index
DAG.getTargetConstant(0, dl, MVT::i32), // Disp
DAG.getRegister(0, MVT::i32), // Segment.
Zero,
Chain
};
SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
return SDValue(Res, 0);
}
// MEMBARRIER is a compiler barrier; it codegens to a no-op.
return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
}
static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT T = Op.getSimpleValueType();
SDLoc DL(Op);
unsigned Reg = 0;
unsigned size = 0;
switch(T.SimpleTy) {
default: llvm_unreachable("Invalid value type!");
case MVT::i8: Reg = X86::AL; size = 1; break;
case MVT::i16: Reg = X86::AX; size = 2; break;
case MVT::i32: Reg = X86::EAX; size = 4; break;
case MVT::i64:
assert(Subtarget.is64Bit() && "Node not type legal!");
Reg = X86::RAX; size = 8;
break;
}
SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
Op.getOperand(2), SDValue());
SDValue Ops[] = { cpIn.getValue(0),
Op.getOperand(1),
Op.getOperand(3),
DAG.getTargetConstant(size, DL, MVT::i8),
cpIn.getValue(1) };
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
Ops, T, MMO);
SDValue cpOut =
DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
MVT::i32, cpOut.getValue(2));
SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
return SDValue();
}
// Create MOVMSKB, taking into account whether we need to split for AVX1.
static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT InVT = V.getSimpleValueType();
if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
DAG.getConstant(16, DL, MVT::i8));
return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
}
return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
}
static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue Src = Op.getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
MVT DstVT = Op.getSimpleValueType();
// Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
// half to v32i1 and concatenating the result.
if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
assert(Subtarget.hasBWI() && "Expected BWI target");
SDLoc dl(Op);
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
DAG.getIntPtrConstant(0, dl));
Lo = DAG.getBitcast(MVT::v32i1, Lo);
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
DAG.getIntPtrConstant(1, dl));
Hi = DAG.getBitcast(MVT::v32i1, Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
}
// Custom splitting for BWI types when AVX512F is available but BWI isn't.
if ((SrcVT == MVT::v32i16 || SrcVT == MVT::v64i8) && DstVT.isVector() &&
DAG.getTargetLoweringInfo().isTypeLegal(DstVT)) {
SDLoc dl(Op);
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
EVT CastVT = MVT::getVectorVT(DstVT.getVectorElementType(),
DstVT.getVectorNumElements() / 2);
Lo = DAG.getBitcast(CastVT, Lo);
Hi = DAG.getBitcast(CastVT, Hi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
}
// Use MOVMSK for vector to scalar conversion to prevent scalarization.
if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
SDLoc DL(Op);
SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
V = getPMOVMSKB(DL, V, DAG, Subtarget);
return DAG.getZExtOrTrunc(V, DL, DstVT);
}
if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
SrcVT == MVT::i64) {
assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
if (DstVT != MVT::f64)
// This conversion needs to be expanded.
return SDValue();
SmallVector<SDValue, 16> Elts;
SDLoc dl(Op);
unsigned NumElts;
MVT SVT;
if (SrcVT.isVector()) {
NumElts = SrcVT.getVectorNumElements();
SVT = SrcVT.getVectorElementType();
// Widen the vector in input in the case of MVT::v2i32.
// Example: from MVT::v2i32 to MVT::v4i32.
for (unsigned i = 0, e = NumElts; i != e; ++i)
Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, Src,
DAG.getIntPtrConstant(i, dl)));
} else {
assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
"Unexpected source type in LowerBITCAST");
Elts.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
DAG.getIntPtrConstant(0, dl)));
Elts.push_back(DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
DAG.getIntPtrConstant(1, dl)));
NumElts = 2;
SVT = MVT::i32;
}
// Explicitly mark the extra elements as Undef.
Elts.append(NumElts, DAG.getUNDEF(SVT));
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
SDValue BV = DAG.getBuildVector(NewVT, dl, Elts);
SDValue ToV2F64 = DAG.getBitcast(MVT::v2f64, BV);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
DAG.getIntPtrConstant(0, dl));
}
assert(Subtarget.is64Bit() && !Subtarget.hasSSE2() &&
Subtarget.hasMMX() && "Unexpected custom BITCAST");
assert((DstVT == MVT::i64 ||
(DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
"Unexpected custom BITCAST");
// i64 <=> MMX conversions are Legal.
if (SrcVT==MVT::i64 && DstVT.isVector())
return Op;
if (DstVT==MVT::i64 && SrcVT.isVector())
return Op;
// MMX <=> MMX conversions are Legal.
if (SrcVT.isVector() && DstVT.isVector())
return Op;
// All other conversions need to be expanded.
return SDValue();
}
/// Compute the horizontal sum of bytes in V for the elements of VT.
///
/// Requires V to be a byte vector and VT to be an integer vector type with
/// wider elements than V's type. The width of the elements of VT determines
/// how many bytes of V are summed horizontally to produce each element of the
/// result.
static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDLoc DL(V);
MVT ByteVecVT = V.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
"Expected value to have byte element type.");
assert(EltVT != MVT::i8 &&
"Horizontal byte sum only makes sense for wider elements!");
unsigned VecSize = VT.getSizeInBits();
assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
// PSADBW instruction horizontally add all bytes and leave the result in i64
// chunks, thus directly computes the pop count for v2i64 and v4i64.
if (EltVT == MVT::i64) {
SDValue Zeros = getZeroVector(ByteVecVT, Subtarget, DAG, DL);
MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
return DAG.getBitcast(VT, V);
}
if (EltVT == MVT::i32) {
// We unpack the low half and high half into i32s interleaved with zeros so
// that we can use PSADBW to horizontally sum them. The most useful part of
// this is that it lines up the results of two PSADBW instructions to be
// two v2i64 vectors which concatenated are the 4 population counts. We can
// then use PACKUSWB to shrink and concatenate them into a v4i32 again.
SDValue Zeros = getZeroVector(VT, Subtarget, DAG, DL);
SDValue V32 = DAG.getBitcast(VT, V);
SDValue Low = DAG.getNode(X86ISD::UNPCKL, DL, VT, V32, Zeros);
SDValue High = DAG.getNode(X86ISD::UNPCKH, DL, VT, V32, Zeros);
// Do the horizontal sums into two v2i64s.
Zeros = getZeroVector(ByteVecVT, Subtarget, DAG, DL);
MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
DAG.getBitcast(ByteVecVT, Low), Zeros);
High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
DAG.getBitcast(ByteVecVT, High), Zeros);
// Merge them together.
MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
DAG.getBitcast(ShortVecVT, Low),
DAG.getBitcast(ShortVecVT, High));
return DAG.getBitcast(VT, V);
}
// The only element type left is i16.
assert(EltVT == MVT::i16 && "Unknown how to handle type");
// To obtain pop count for each i16 element starting from the pop count for
// i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
// right by 8. It is important to shift as i16s as i8 vector shift isn't
// directly supported.
SDValue ShifterV = DAG.getConstant(8, DL, VT);
SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
DAG.getBitcast(ByteVecVT, V));
return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
}
static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
MVT EltVT = VT.getVectorElementType();
unsigned VecSize = VT.getSizeInBits();
// Implement a lookup table in register by using an algorithm based on:
// http://wm.ite.pl/articles/sse-popcount.html
//
// The general idea is that every lower byte nibble in the input vector is an
// index into a in-register pre-computed pop count table. We then split up the
// input vector in two new ones: (1) a vector with only the shifted-right
// higher nibbles for each byte and (2) a vector with the lower nibbles (and
// masked out higher ones) for each byte. PSHUFB is used separately with both
// to index the in-register table. Next, both are added and the result is a
// i8 vector where each element contains the pop count for input byte.
//
// To obtain the pop count for elements != i8, we follow up with the same
// approach and use additional tricks as described below.
//
const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
/* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
/* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
/* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
int NumByteElts = VecSize / 8;
MVT ByteVecVT = MVT::getVectorVT(MVT::i8, NumByteElts);
SDValue In = DAG.getBitcast(ByteVecVT, Op);
SmallVector<SDValue, 64> LUTVec;
for (int i = 0; i < NumByteElts; ++i)
LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
SDValue InRegLUT = DAG.getBuildVector(ByteVecVT, DL, LUTVec);
SDValue M0F = DAG.getConstant(0x0F, DL, ByteVecVT);
// High nibbles
SDValue FourV = DAG.getConstant(4, DL, ByteVecVT);
SDValue HighNibbles = DAG.getNode(ISD::SRL, DL, ByteVecVT, In, FourV);
// Low nibbles
SDValue LowNibbles = DAG.getNode(ISD::AND, DL, ByteVecVT, In, M0F);
// The input vector is used as the shuffle mask that index elements into the
// LUT. After counting low and high nibbles, add the vector to obtain the
// final pop count per i8 element.
SDValue HighPopCnt =
DAG.getNode(X86ISD::PSHUFB, DL, ByteVecVT, InRegLUT, HighNibbles);
SDValue LowPopCnt =
DAG.getNode(X86ISD::PSHUFB, DL, ByteVecVT, InRegLUT, LowNibbles);
SDValue PopCnt = DAG.getNode(ISD::ADD, DL, ByteVecVT, HighPopCnt, LowPopCnt);
if (EltVT == MVT::i8)
return PopCnt;
return LowerHorizontalByteSum(PopCnt, VT, Subtarget, DAG);
}
static SDValue LowerVectorCTPOPBitmath(SDValue Op, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert(VT.is128BitVector() &&
"Only 128-bit vector bitmath lowering supported.");
int VecSize = VT.getSizeInBits();
MVT EltVT = VT.getVectorElementType();
int Len = EltVT.getSizeInBits();
// This is the vectorized version of the "best" algorithm from
// http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
// with a minor tweak to use a series of adds + shifts instead of vector
// multiplications. Implemented for all integer vector types. We only use
// this when we don't have SSSE3 which allows a LUT-based lowering that is
// much faster, even faster than using native popcnt instructions.
auto GetShift = [&](unsigned OpCode, SDValue V, int Shifter) {
MVT VT = V.getSimpleValueType();
SDValue ShifterV = DAG.getConstant(Shifter, DL, VT);
return DAG.getNode(OpCode, DL, VT, V, ShifterV);
};
auto GetMask = [&](SDValue V, APInt Mask) {
MVT VT = V.getSimpleValueType();
SDValue MaskV = DAG.getConstant(Mask, DL, VT);
return DAG.getNode(ISD::AND, DL, VT, V, MaskV);
};
// We don't want to incur the implicit masks required to SRL vNi8 vectors on
// x86, so set the SRL type to have elements at least i16 wide. This is
// correct because all of our SRLs are followed immediately by a mask anyways
// that handles any bits that sneak into the high bits of the byte elements.
MVT SrlVT = Len > 8 ? VT : MVT::getVectorVT(MVT::i16, VecSize / 16);
SDValue V = Op;
// v = v - ((v >> 1) & 0x55555555...)
SDValue Srl =
DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 1));
SDValue And = GetMask(Srl, APInt::getSplat(Len, APInt(8, 0x55)));
V = DAG.getNode(ISD::SUB, DL, VT, V, And);
// v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
SDValue AndLHS = GetMask(V, APInt::getSplat(Len, APInt(8, 0x33)));
Srl = DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 2));
SDValue AndRHS = GetMask(Srl, APInt::getSplat(Len, APInt(8, 0x33)));
V = DAG.getNode(ISD::ADD, DL, VT, AndLHS, AndRHS);
// v = (v + (v >> 4)) & 0x0F0F0F0F...
Srl = DAG.getBitcast(VT, GetShift(ISD::SRL, DAG.getBitcast(SrlVT, V), 4));
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, V, Srl);
V = GetMask(Add, APInt::getSplat(Len, APInt(8, 0x0F)));
// At this point, V contains the byte-wise population count, and we are
// merely doing a horizontal sum if necessary to get the wider element
// counts.
if (EltVT == MVT::i8)
return V;
return LowerHorizontalByteSum(
DAG.getBitcast(MVT::getVectorVT(MVT::i8, VecSize / 8), V), VT, Subtarget,
DAG);
}
// Please ensure that any codegen change from LowerVectorCTPOP is reflected in
// updated cost models in X86TTIImpl::getIntrinsicInstrCost.
static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
"Unknown CTPOP type to handle");
SDLoc DL(Op.getNode());
SDValue Op0 = Op.getOperand(0);
// TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
if (Subtarget.hasVPOPCNTDQ()) {
unsigned NumElems = VT.getVectorNumElements();
assert((VT.getVectorElementType() == MVT::i8 ||
VT.getVectorElementType() == MVT::i16) && "Unexpected type");
if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
}
}
if (!Subtarget.hasSSSE3()) {
// We can't use the fast LUT approach, so fall back on vectorized bitmath.
assert(VT.is128BitVector() && "Only 128-bit vectors supported in SSE!");
return LowerVectorCTPOPBitmath(Op0, DL, Subtarget, DAG);
}
// Decompose 256-bit ops into smaller 128-bit ops.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntUnary(Op, DAG);
// Decompose 512-bit ops into smaller 256-bit ops.
if (VT.is512BitVector() && !Subtarget.hasBWI())
return Lower512IntUnary(Op, DAG);
return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
}
static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Op.getSimpleValueType().isVector() &&
"We only do custom lowering for vector population count.");
return LowerVectorCTPOP(Op, Subtarget, DAG);
}
static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
SDValue In = Op.getOperand(0);
SDLoc DL(Op);
// For scalars, its still beneficial to transfer to/from the SIMD unit to
// perform the BITREVERSE.
if (!VT.isVector()) {
MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
DAG.getIntPtrConstant(0, DL));
}
int NumElts = VT.getVectorNumElements();
int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
// Decompose 256-bit ops into smaller 128-bit ops.
if (VT.is256BitVector())
return Lower256IntUnary(Op, DAG);
assert(VT.is128BitVector() &&
"Only 128-bit vector bitreverse lowering supported.");
// VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
// perform the BSWAP in the shuffle.
// Its best to shuffle using the second operand as this will implicitly allow
// memory folding for multiple vectors.
SmallVector<SDValue, 16> MaskElts;
for (int i = 0; i != NumElts; ++i) {
for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
int PermuteByte = SourceByte | (2 << 5);
MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
}
}
SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
SDValue Res = DAG.getBitcast(MVT::v16i8, In);
Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
Res, Mask);
return DAG.getBitcast(VT, Res);
}
static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
if (Subtarget.hasXOP() && !VT.is512BitVector())
return LowerBITREVERSE_XOP(Op, DAG);
assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
SDValue In = Op.getOperand(0);
SDLoc DL(Op);
unsigned NumElts = VT.getVectorNumElements();
assert(VT.getScalarType() == MVT::i8 &&
"Only byte vector BITREVERSE supported");
// Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
if (VT.is256BitVector() && !Subtarget.hasInt256())
return Lower256IntUnary(Op, DAG);
// Perform BITREVERSE using PSHUFB lookups. Each byte is split into
// two nibbles and a PSHUFB lookup to find the bitreverse of each
// 0-15 value (moved to the other nibble).
SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
const int LoLUT[16] = {
/* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
/* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
/* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
/* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
const int HiLUT[16] = {
/* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
/* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
/* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
/* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
for (unsigned i = 0; i < NumElts; ++i) {
LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
}
SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
}
static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
bool AllowIncDec = true) {
unsigned NewOpc = 0;
switch (N->getOpcode()) {
case ISD::ATOMIC_LOAD_ADD:
NewOpc = X86ISD::LADD;
break;
case ISD::ATOMIC_LOAD_SUB:
NewOpc = X86ISD::LSUB;
break;
case ISD::ATOMIC_LOAD_OR:
NewOpc = X86ISD::LOR;
break;
case ISD::ATOMIC_LOAD_XOR:
NewOpc = X86ISD::LXOR;
break;
case ISD::ATOMIC_LOAD_AND:
NewOpc = X86ISD::LAND;
break;
default:
llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
}
MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
// Convert to inc/dec if they aren't slow or we are optimizing for size.
if (AllowIncDec && (!Subtarget.slowIncDec() ||
DAG.getMachineFunction().getFunction().optForSize())) {
if ((NewOpc == X86ISD::LADD && C->isOne()) ||
(NewOpc == X86ISD::LSUB && C->isAllOnesValue()))
return DAG.getMemIntrinsicNode(X86ISD::LINC, SDLoc(N),
DAG.getVTList(MVT::i32, MVT::Other),
{N->getOperand(0), N->getOperand(1)},
/*MemVT=*/N->getSimpleValueType(0), MMO);
if ((NewOpc == X86ISD::LSUB && C->isOne()) ||
(NewOpc == X86ISD::LADD && C->isAllOnesValue()))
return DAG.getMemIntrinsicNode(X86ISD::LDEC, SDLoc(N),
DAG.getVTList(MVT::i32, MVT::Other),
{N->getOperand(0), N->getOperand(1)},
/*MemVT=*/N->getSimpleValueType(0), MMO);
}
}
return DAG.getMemIntrinsicNode(
NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
{N->getOperand(0), N->getOperand(1), N->getOperand(2)},
/*MemVT=*/N->getSimpleValueType(0), MMO);
}
/// Lower atomic_load_ops into LOCK-prefixed operations.
static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue Chain = N->getOperand(0);
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
unsigned Opc = N->getOpcode();
MVT VT = N->getSimpleValueType(0);
SDLoc DL(N);
// We can lower atomic_load_add into LXADD. However, any other atomicrmw op
// can only be lowered when the result is unused. They should have already
// been transformed into a cmpxchg loop in AtomicExpand.
if (N->hasAnyUseOfValue(0)) {
// Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
// select LXADD if LOCK_SUB can't be selected.
if (Opc == ISD::ATOMIC_LOAD_SUB) {
AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
RHS, AN->getMemOperand());
}
assert(Opc == ISD::ATOMIC_LOAD_ADD &&
"Used AtomicRMW ops other than Add should have been expanded!");
return N;
}
SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
// RAUW the chain, but don't worry about the result, as it's unused.
assert(!N->hasAnyUseOfValue(0));
DAG.ReplaceAllUsesOfValueWith(N.getValue(1), LockOp.getValue(1));
return SDValue();
}
static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
SDNode *Node = Op.getNode();
SDLoc dl(Node);
EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
// Convert seq_cst store -> xchg
// Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
// FIXME: On 32-bit, store -> fist or movq would be more efficient
// (The only way to get a 16-byte store is cmpxchg16b)
// FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
if (cast<AtomicSDNode>(Node)->getOrdering() ==
AtomicOrdering::SequentiallyConsistent ||
!DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
cast<AtomicSDNode>(Node)->getMemoryVT(),
Node->getOperand(0),
Node->getOperand(1), Node->getOperand(2),
cast<AtomicSDNode>(Node)->getMemOperand());
return Swap.getValue(1);
}
// Other atomic stores have a simple pattern.
return Op;
}
static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
SDNode *N = Op.getNode();
MVT VT = N->getSimpleValueType(0);
// Let legalize expand this if it isn't a legal type yet.
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
SDLoc DL(N);
// Set the carry flag.
SDValue Carry = Op.getOperand(2);
EVT CarryVT = Carry.getValueType();
APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
Carry, DAG.getConstant(NegOne, DL, CarryVT));
unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
Op.getOperand(1), Carry.getValue(1));
SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
if (N->getValueType(1) == MVT::i1)
SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
}
static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
// For MacOSX, we want to call an alternative entry point: __sincos_stret,
// which returns the values as { float, float } (in XMM0) or
// { double, double } (which is returned in XMM0, XMM1).
SDLoc dl(Op);
SDValue Arg = Op.getOperand(0);
EVT ArgVT = Arg.getValueType();
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
Entry.Node = Arg;
Entry.Ty = ArgTy;
Entry.IsSExt = false;
Entry.IsZExt = false;
Args.push_back(Entry);
bool isF64 = ArgVT == MVT::f64;
// Only optimize x86_64 for now. i386 is a bit messy. For f32,
// the small struct {f32, f32} is returned in (eax, edx). For f64,
// the results are returned via SRet in memory.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
const char *LibcallName = TLI.getLibcallName(LC);
SDValue Callee =
DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
: (Type *)VectorType::get(ArgTy, 4);
TargetLowering::CallLoweringInfo CLI(DAG);
CLI.setDebugLoc(dl)
.setChain(DAG.getEntryNode())
.setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
if (isF64)
// Returned in xmm0 and xmm1.
return CallResult.first;
// Returned in bits 0:31 and 32:64 xmm0.
SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
CallResult.first, DAG.getIntPtrConstant(0, dl));
SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
CallResult.first, DAG.getIntPtrConstant(1, dl));
SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
}
/// Widen a vector input to a vector of NVT. The
/// input vector must have the same element type as NVT.
static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
bool FillWithZeroes = false) {
// Check if InOp already has the right width.
MVT InVT = InOp.getSimpleValueType();
if (InVT == NVT)
return InOp;
if (InOp.isUndef())
return DAG.getUNDEF(NVT);
assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
"input and widen element type must match");
unsigned InNumElts = InVT.getVectorNumElements();
unsigned WidenNumElts = NVT.getVectorNumElements();
assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
"Unexpected request for vector widening");
SDLoc dl(InOp);
if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
InOp.getNumOperands() == 2) {
SDValue N1 = InOp.getOperand(1);
if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
N1.isUndef()) {
InOp = InOp.getOperand(0);
InVT = InOp.getSimpleValueType();
InNumElts = InVT.getVectorNumElements();
}
}
if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
SmallVector<SDValue, 16> Ops;
for (unsigned i = 0; i < InNumElts; ++i)
Ops.push_back(InOp.getOperand(i));
EVT EltVT = InOp.getOperand(0).getValueType();
SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
DAG.getUNDEF(EltVT);
for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
Ops.push_back(FillVal);
return DAG.getBuildVector(NVT, dl, Ops);
}
SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
DAG.getUNDEF(NVT);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
InOp, DAG.getIntPtrConstant(0, dl));
}
static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Subtarget.hasAVX512() &&
"MGATHER/MSCATTER are supported on AVX-512 arch only");
MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
SDValue Src = N->getValue();
MVT VT = Src.getSimpleValueType();
assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
SDLoc dl(Op);
SDValue Scale = N->getScale();
SDValue Index = N->getIndex();
SDValue Mask = N->getMask();
SDValue Chain = N->getChain();
SDValue BasePtr = N->getBasePtr();
if (VT == MVT::v2f32) {
assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
// If the index is v2i64 and we have VLX we can use xmm for data and index.
if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
DAG.getUNDEF(MVT::v2f32));
SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1));
return SDValue(NewScatter.getNode(), 1);
}
return SDValue();
}
if (VT == MVT::v2i32) {
assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
DAG.getUNDEF(MVT::v2i32));
// If the index is v2i64 and we have VLX we can use xmm for data and index.
if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1));
return SDValue(NewScatter.getNode(), 1);
}
// Custom widen all the operands to avoid promotion.
EVT NewIndexVT = EVT::getVectorVT(
*DAG.getContext(), Index.getValueType().getVectorElementType(), 4);
Index = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewIndexVT, Index,
DAG.getUNDEF(Index.getValueType()));
Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
DAG.getConstant(0, dl, MVT::v2i1));
SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), N->getMemoryVT(), dl,
Ops, N->getMemOperand());
}
MVT IndexVT = Index.getSimpleValueType();
MVT MaskVT = Mask.getSimpleValueType();
// If the index is v2i32, we're being called by type legalization and we
// should just let the default handling take care of it.
if (IndexVT == MVT::v2i32)
return SDValue();
// If we don't have VLX and neither the passthru or index is 512-bits, we
// need to widen until one is.
if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
!Index.getSimpleValueType().is512BitVector()) {
// Determine how much we need to widen by to get a 512-bit type.
unsigned Factor = std::min(512/VT.getSizeInBits(),
512/IndexVT.getSizeInBits());
unsigned NumElts = VT.getVectorNumElements() * Factor;
VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
Src = ExtendToType(Src, VT, DAG);
Index = ExtendToType(Index, IndexVT, DAG);
Mask = ExtendToType(Mask, MaskVT, DAG, true);
}
SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1));
return SDValue(NewScatter.getNode(), 1);
}
static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
MVT VT = Op.getSimpleValueType();
MVT ScalarVT = VT.getScalarType();
SDValue Mask = N->getMask();
SDLoc dl(Op);
assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
"Expanding masked load is supported on AVX-512 target only!");
assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
"Expanding masked load is supported for 32 and 64-bit types only!");
assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
"Cannot lower masked load op.");
assert((ScalarVT.getSizeInBits() >= 32 ||
(Subtarget.hasBWI() &&
(ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
"Unsupported masked load op.");
// This operation is legal for targets with VLX, but without
// VLX the vector should be widened to 512 bit
unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
SDValue Src0 = N->getSrc0();
Src0 = ExtendToType(Src0, WideDataVT, DAG);
// Mask element has to be i1.
assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
"Unexpected mask type");
MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
N->getBasePtr(), Mask, Src0,
N->getMemoryVT(), N->getMemOperand(),
N->getExtensionType(),
N->isExpandingLoad());
SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
NewLoad.getValue(0),
DAG.getIntPtrConstant(0, dl));
SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
return DAG.getMergeValues(RetOps, dl);
}
static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
SDValue DataToStore = N->getValue();
MVT VT = DataToStore.getSimpleValueType();
MVT ScalarVT = VT.getScalarType();
SDValue Mask = N->getMask();
SDLoc dl(Op);
assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
"Expanding masked load is supported on AVX-512 target only!");
assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
"Expanding masked load is supported for 32 and 64-bit types only!");
assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
"Cannot lower masked store op.");
assert((ScalarVT.getSizeInBits() >= 32 ||
(Subtarget.hasBWI() &&
(ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
"Unsupported masked store op.");
// This operation is legal for targets with VLX, but without
// VLX the vector should be widened to 512 bit
unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
// Mask element has to be i1.
assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
"Unexpected mask type");
MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
Mask, N->getMemoryVT(), N->getMemOperand(),
N->isTruncatingStore(), N->isCompressingStore());
}
static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
assert(Subtarget.hasAVX2() &&
"MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
SDLoc dl(Op);
MVT VT = Op.getSimpleValueType();
SDValue Index = N->getIndex();
SDValue Mask = N->getMask();
SDValue Src0 = N->getValue();
MVT IndexVT = Index.getSimpleValueType();
MVT MaskVT = Mask.getSimpleValueType();
assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
// If the index is v2i32, we're being called by type legalization.
if (IndexVT == MVT::v2i32)
return SDValue();
// If we don't have VLX and neither the passthru or index is 512-bits, we
// need to widen until one is.
MVT OrigVT = VT;
if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
!IndexVT.is512BitVector()) {
// Determine how much we need to widen by to get a 512-bit type.
unsigned Factor = std::min(512/VT.getSizeInBits(),
512/IndexVT.getSizeInBits());
unsigned NumElts = VT.getVectorNumElements() * Factor;
VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
Src0 = ExtendToType(Src0, VT, DAG);
Index = ExtendToType(Index, IndexVT, DAG);
Mask = ExtendToType(Mask, MaskVT, DAG, true);
}
SDValue Ops[] = { N->getChain(), Src0, Mask, N->getBasePtr(), Index,
N->getScale() };
SDValue NewGather = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
DAG.getVTList(VT, MaskVT, MVT::Other), Ops, dl, N->getMemoryVT(),
N->getMemOperand());
SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
NewGather, DAG.getIntPtrConstant(0, dl));
return DAG.getMergeValues({Extract, NewGather.getValue(2)}, dl);
}
SDValue X86TargetLowering::LowerGC_TRANSITION_START(SDValue Op,
SelectionDAG &DAG) const {
// TODO: Eventually, the lowering of these nodes should be informed by or
// deferred to the GC strategy for the function in which they appear. For
// now, however, they must be lowered to something. Since they are logically
// no-ops in the case of a null GC strategy (or a GC strategy which does not
// require special handling for these nodes), lower them as literal NOOPs for
// the time being.
SmallVector<SDValue, 2> Ops;
Ops.push_back(Op.getOperand(0));
if (Op->getGluedNode())
Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
SDLoc OpDL(Op);
SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
return NOOP;
}
SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op,
SelectionDAG &DAG) const {
// TODO: Eventually, the lowering of these nodes should be informed by or
// deferred to the GC strategy for the function in which they appear. For
// now, however, they must be lowered to something. Since they are logically
// no-ops in the case of a null GC strategy (or a GC strategy which does not
// require special handling for these nodes), lower them as literal NOOPs for
// the time being.
SmallVector<SDValue, 2> Ops;
Ops.push_back(Op.getOperand(0));
if (Op->getGluedNode())
Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
SDLoc OpDL(Op);
SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
return NOOP;
}
/// Provide custom lowering hooks for some operations.
SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: llvm_unreachable("Should not custom lower this!");
case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
return LowerCMP_SWAP(Op, Subtarget, DAG);
case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_SUB:
case ISD::ATOMIC_LOAD_OR:
case ISD::ATOMIC_LOAD_XOR:
case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG);
case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
case ISD::VSELECT: return LowerVSELECT(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
case ISD::SHL_PARTS:
case ISD::SRA_PARTS:
case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
case ISD::ZERO_EXTEND_VECTOR_INREG:
case ISD::SIGN_EXTEND_VECTOR_INREG:
return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG);
case ISD::STORE: return LowerStore(Op, Subtarget, DAG);
case ISD::FABS:
case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
case ISD::SETCC: return LowerSETCC(Op, DAG);
case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
case ISD::SELECT: return LowerSELECT(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::VASTART: return LowerVASTART(Op, DAG);
case ISD::VAARG: return LowerVAARG(Op, DAG);
case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
case ISD::FRAME_TO_ARGS_OFFSET:
return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
case ISD::EH_SJLJ_SETUP_DISPATCH:
return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
case ISD::CTLZ:
case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
case ISD::CTTZ:
case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, DAG);
case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
case ISD::MULHS:
case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
case ISD::UMUL_LOHI:
case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
case ISD::ROTL:
case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG);
case ISD::SRA:
case ISD::SRL:
case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
case ISD::SADDO:
case ISD::UADDO:
case ISD::SSUBO:
case ISD::USUBO:
case ISD::SMULO:
case ISD::UMULO: return LowerXALUO(Op, DAG);
case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
case ISD::ADDCARRY:
case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
case ISD::ADD:
case ISD::SUB: return LowerADD_SUB(Op, DAG);
case ISD::SMAX:
case ISD::SMIN:
case ISD::UMAX:
case ISD::UMIN: return LowerMINMAX(Op, DAG);
case ISD::ABS: return LowerABS(Op, DAG);
case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
case ISD::GC_TRANSITION_START:
return LowerGC_TRANSITION_START(Op, DAG);
case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG);
}
}
/// Places new result values for the node in Results (their number
/// and types must exactly match those of the original return values of
/// the node), or leaves Results empty, which indicates that the node is not
/// to be custom lowered after all.
void X86TargetLowering::LowerOperationWrapper(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const {
SDValue Res = LowerOperation(SDValue(N, 0), DAG);
if (!Res.getNode())
return;
assert((N->getNumValues() <= Res->getNumValues()) &&
"Lowering returned the wrong number of results!");
// Places new result values base on N result number.
// In some cases (LowerSINT_TO_FP for example) Res has more result values
// than original node, chain should be dropped(last value).
for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
Results.push_back(Res.getValue(I));
}
/// Replace a node with an illegal result type with a new node built out of
/// custom code.
void X86TargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const {
SDLoc dl(N);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
switch (N->getOpcode()) {
default:
llvm_unreachable("Do not know how to custom type legalize this operation!");
case X86ISD::AVG: {
// Legalize types for X86ISD::AVG by expanding vectors.
assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
auto InVT = N->getValueType(0);
assert(InVT.getSizeInBits() < 128);
assert(128 % InVT.getSizeInBits() == 0);
unsigned NumConcat = 128 / InVT.getSizeInBits();
EVT RegVT = EVT::getVectorVT(*DAG.getContext(),
InVT.getVectorElementType(),
NumConcat * InVT.getVectorNumElements());
SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
Ops[0] = N->getOperand(0);
SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Ops);
Ops[0] = N->getOperand(1);
SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Ops);
SDValue Res = DAG.getNode(X86ISD::AVG, dl, RegVT, InVec0, InVec1);
if (getTypeAction(*DAG.getContext(), InVT) != TypeWidenVector)
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, InVT, Res,
DAG.getIntPtrConstant(0, dl));
Results.push_back(Res);
return;
}
case ISD::SETCC: {
// Widen v2i32 (setcc v2f32). This is really needed for AVX512VL when
// setCC result type is v2i1 because type legalzation will end up with
// a v4i1 setcc plus an extend.
assert(N->getValueType(0) == MVT::v2i32 && "Unexpected type");
if (N->getOperand(0).getValueType() != MVT::v2f32)
return;
SDValue UNDEF = DAG.getUNDEF(MVT::v2f32);
SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
N->getOperand(0), UNDEF);
SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
N->getOperand(1), UNDEF);
SDValue Res = DAG.getNode(ISD::SETCC, dl, MVT::v4i32, LHS, RHS,
N->getOperand(2));
if (getTypeAction(*DAG.getContext(), MVT::v2i32) != TypeWidenVector)
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
DAG.getIntPtrConstant(0, dl));
Results.push_back(Res);
return;
}
// We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
case X86ISD::FMINC:
case X86ISD::FMIN:
case X86ISD::FMAXC:
case X86ISD::FMAX: {
EVT VT = N->getValueType(0);
assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
SDValue UNDEF = DAG.getUNDEF(VT);
SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
N->getOperand(0), UNDEF);
SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
N->getOperand(1), UNDEF);
Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
return;
}
case ISD::SDIV:
case ISD::UDIV:
case ISD::SREM:
case ISD::UREM:
case ISD::SDIVREM:
case ISD::UDIVREM: {
SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
Results.push_back(V);
return;
}
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT: {
bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
EVT VT = N->getValueType(0);
SDValue Src = N->getOperand(0);
EVT SrcVT = Src.getValueType();
if (VT == MVT::v2i32) {
assert((IsSigned || Subtarget.hasAVX512()) &&
"Can only handle signed conversion without AVX512");
assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
if (Src.getValueType() == MVT::v2f64) {
MVT ResVT = MVT::v4i32;
unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
if (!IsSigned && !Subtarget.hasVLX()) {
// Widen to 512-bits.
ResVT = MVT::v8i32;
Opc = ISD::FP_TO_UINT;
Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64,
DAG.getUNDEF(MVT::v8f64),
Src, DAG.getIntPtrConstant(0, dl));
}
SDValue Res = DAG.getNode(Opc, dl, ResVT, Src);
bool WidenType = getTypeAction(*DAG.getContext(),
MVT::v2i32) == TypeWidenVector;
ResVT = WidenType ? MVT::v4i32 : MVT::v2i32;
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Res,
DAG.getIntPtrConstant(0, dl));
Results.push_back(Res);
return;
}
if (SrcVT == MVT::v2f32) {
SDValue Idx = DAG.getIntPtrConstant(0, dl);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
DAG.getUNDEF(MVT::v2f32));
Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT
: ISD::FP_TO_UINT, dl, MVT::v4i32, Res);
if (getTypeAction(*DAG.getContext(), MVT::v2i32) != TypeWidenVector)
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res, Idx);
Results.push_back(Res);
return;
}
// The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
// so early out here.
return;
}
if (Subtarget.hasDQI() && VT == MVT::i64 &&
(SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
assert(!Subtarget.is64Bit() && "i64 should be legal");
unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
// Using a 256-bit input here to guarantee 128-bit input for f32 case.
// TODO: Use 128-bit vectors for f64 case?
// TODO: Use 128-bit vectors for f32 by using CVTTP2SI/CVTTP2UI.
MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), NumElts);
SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
DAG.getConstantFP(0.0, dl, VecInVT), Src,
ZeroIdx);
Res = DAG.getNode(N->getOpcode(), SDLoc(N), VecVT, Res);
Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
Results.push_back(Res);
return;
}
std::pair<SDValue,SDValue> Vals =
FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
SDValue FIST = Vals.first, StackSlot = Vals.second;
if (FIST.getNode()) {
// Return a load from the stack slot.
if (StackSlot.getNode())
Results.push_back(
DAG.getLoad(VT, dl, FIST, StackSlot, MachinePointerInfo()));
else
Results.push_back(FIST);
}
return;
}
case ISD::SINT_TO_FP: {
assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!");
SDValue Src = N->getOperand(0);
if (N->getValueType(0) != MVT::v2f32 || Src.getValueType() != MVT::v2i64)
return;
Results.push_back(DAG.getNode(X86ISD::CVTSI2P, dl, MVT::v4f32, Src));
return;
}
case ISD::UINT_TO_FP: {
assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
EVT VT = N->getValueType(0);
if (VT != MVT::v2f32)
return;
SDValue Src = N->getOperand(0);
EVT SrcVT = Src.getValueType();
if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
Results.push_back(DAG.getNode(X86ISD::CVTUI2P, dl, MVT::v4f32, Src));
return;
}
if (SrcVT != MVT::v2i32)
return;
SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
SDValue VBias =
DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
DAG.getBitcast(MVT::v2i64, VBias));
Or = DAG.getBitcast(MVT::v2f64, Or);
// TODO: Are there any fast-math-flags to propagate here?
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
return;
}
case ISD::FP_ROUND: {
if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
return;
SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
Results.push_back(V);
return;
}
case ISD::FP_EXTEND: {
// Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
// No other ValueType for FP_EXTEND should reach this point.
assert(N->getValueType(0) == MVT::v2f32 &&
"Do not know how to legalize this Node");
return;
}
case ISD::INTRINSIC_W_CHAIN: {
unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
switch (IntNo) {
default : llvm_unreachable("Do not know how to custom type "
"legalize this intrinsic operation!");
case Intrinsic::x86_rdtsc:
return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
Results);
case Intrinsic::x86_rdtscp:
return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
Results);
case Intrinsic::x86_rdpmc:
return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
case Intrinsic::x86_xgetbv:
return getExtendedControlRegister(N, dl, DAG, Subtarget, Results);
}
}
case ISD::INTRINSIC_WO_CHAIN: {
if (SDValue V = LowerINTRINSIC_WO_CHAIN(SDValue(N, 0), DAG))
Results.push_back(V);
return;
}
case ISD::READCYCLECOUNTER: {
return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
Results);
}
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
EVT T = N->getValueType(0);
assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
bool Regs64bit = T == MVT::i128;
MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
SDValue cpInL, cpInH;
cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
DAG.getConstant(0, dl, HalfT));
cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
DAG.getConstant(1, dl, HalfT));
cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
Regs64bit ? X86::RAX : X86::EAX,
cpInL, SDValue());
cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
Regs64bit ? X86::RDX : X86::EDX,
cpInH, cpInL.getValue(1));
SDValue swapInL, swapInH;
swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
DAG.getConstant(0, dl, HalfT));
swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
DAG.getConstant(1, dl, HalfT));
swapInH =
DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
swapInH, cpInH.getValue(1));
// If the current function needs the base pointer, RBX,
// we shouldn't use cmpxchg directly.
// Indeed the lowering of that instruction will clobber
// that register and since RBX will be a reserved register
// the register allocator will not make sure its value will
// be properly saved and restored around this live-range.
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
SDValue Result;
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
unsigned BasePtr = TRI->getBaseRegister();
MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
(BasePtr == X86::RBX || BasePtr == X86::EBX)) {
// ISel prefers the LCMPXCHG64 variant.
// If that assert breaks, that means it is not the case anymore,
// and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
// not just EBX. This is a matter of accepting i64 input for that
// pseudo, and restoring into the register of the right wide
// in expand pseudo. Everything else should just work.
assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
"Saving only half of the RBX");
unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
: X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
Regs64bit ? X86::RBX : X86::EBX,
HalfT, swapInH.getValue(1));
SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
RBXSave,
/*Glue*/ RBXSave.getValue(2)};
Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
} else {
unsigned Opcode =
Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
Regs64bit ? X86::RBX : X86::EBX, swapInL,
swapInH.getValue(1));
SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
swapInL.getValue(1)};
Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
}
SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
Regs64bit ? X86::RAX : X86::EAX,
HalfT, Result.getValue(1));
SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
Regs64bit ? X86::RDX : X86::EDX,
HalfT, cpOutL.getValue(2));
SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
MVT::i32, cpOutH.getValue(2));
SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
Results.push_back(Success);
Results.push_back(EFLAGS.getValue(1));
return;
}
case ISD::ATOMIC_SWAP:
case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_SUB:
case ISD::ATOMIC_LOAD_AND:
case ISD::ATOMIC_LOAD_OR:
case ISD::ATOMIC_LOAD_XOR:
case ISD::ATOMIC_LOAD_NAND:
case ISD::ATOMIC_LOAD_MIN:
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_LOAD_UMAX:
case ISD::ATOMIC_LOAD: {
// Delegate to generic TypeLegalization. Situations we can really handle
// should have already been dealt with by AtomicExpandPass.cpp.
break;
}
case ISD::BITCAST: {
assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
EVT DstVT = N->getValueType(0);
EVT SrcVT = N->getOperand(0).getValueType();
// If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
// we can split using the k-register rather than memory.
if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
Lo = DAG.getBitcast(MVT::i32, Lo);
Hi = DAG.getBitcast(MVT::i32, Hi);
SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
Results.push_back(Res);
return;
}
// Custom splitting for BWI types when AVX512F is available but BWI isn't.
if ((DstVT == MVT::v32i16 || DstVT == MVT::v64i8) &&
SrcVT.isVector() && isTypeLegal(SrcVT)) {
SDValue Lo, Hi;
std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
MVT CastVT = (DstVT == MVT::v32i16) ? MVT::v16i16 : MVT::v32i8;
Lo = DAG.getBitcast(CastVT, Lo);
Hi = DAG.getBitcast(CastVT, Hi);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
Results.push_back(Res);
return;
}
if (SrcVT != MVT::f64 ||
(DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
return;
unsigned NumElts = DstVT.getVectorNumElements();
EVT SVT = DstVT.getVectorElementType();
EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
MVT::v2f64, N->getOperand(0));
SDValue ToVecInt = DAG.getBitcast(WiderVT, Expanded);
if (getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector) {
// If we are legalizing vectors by widening, we already have the desired
// legal vector type, just return it.
Results.push_back(ToVecInt);
return;
}
SmallVector<SDValue, 8> Elts;
for (unsigned i = 0, e = NumElts; i != e; ++i)
Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
ToVecInt, DAG.getIntPtrConstant(i, dl)));
Results.push_back(DAG.getBuildVector(DstVT, dl, Elts));
return;
}
case ISD::MGATHER: {
EVT VT = N->getValueType(0);
if (VT == MVT::v2f32 && (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
auto *Gather = cast<MaskedGatherSDNode>(N);
SDValue Index = Gather->getIndex();
if (Index.getValueType() != MVT::v2i64)
return;
SDValue Mask = Gather->getMask();
assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
SDValue Src0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
Gather->getValue(),
DAG.getUNDEF(MVT::v2f32));
if (!Subtarget.hasVLX()) {
// We need to widen the mask, but the instruction will only use 2
// of its elements. So we can use undef.
Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
DAG.getUNDEF(MVT::v2i1));
Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
}
SDValue Ops[] = { Gather->getChain(), Src0, Mask, Gather->getBasePtr(),
Index, Gather->getScale() };
SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
DAG.getVTList(MVT::v4f32, Mask.getValueType(), MVT::Other), Ops, dl,
Gather->getMemoryVT(), Gather->getMemOperand());
Results.push_back(Res);
Results.push_back(Res.getValue(2));
return;
}
if (VT == MVT::v2i32) {
auto *Gather = cast<MaskedGatherSDNode>(N);
SDValue Index = Gather->getIndex();
SDValue Mask = Gather->getMask();
assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
SDValue Src0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32,
Gather->getValue(),
DAG.getUNDEF(MVT::v2i32));
// If the index is v2i64 we can use it directly.
if (Index.getValueType() == MVT::v2i64 &&
(Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
if (!Subtarget.hasVLX()) {
// We need to widen the mask, but the instruction will only use 2
// of its elements. So we can use undef.
Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
DAG.getUNDEF(MVT::v2i1));
Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
}
SDValue Ops[] = { Gather->getChain(), Src0, Mask, Gather->getBasePtr(),
Index, Gather->getScale() };
SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
DAG.getVTList(MVT::v4i32, Mask.getValueType(), MVT::Other), Ops, dl,
Gather->getMemoryVT(), Gather->getMemOperand());
SDValue Chain = Res.getValue(2);
if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
DAG.getIntPtrConstant(0, dl));
Results.push_back(Res);
Results.push_back(Chain);
return;
}
EVT IndexVT = Index.getValueType();
EVT NewIndexVT = EVT::getVectorVT(*DAG.getContext(),
IndexVT.getScalarType(), 4);
// Otherwise we need to custom widen everything to avoid promotion.
Index = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewIndexVT, Index,
DAG.getUNDEF(IndexVT));
Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
DAG.getConstant(0, dl, MVT::v2i1));
SDValue Ops[] = { Gather->getChain(), Src0, Mask, Gather->getBasePtr(),
Index, Gather->getScale() };
SDValue Res = DAG.getMaskedGather(DAG.getVTList(MVT::v4i32, MVT::Other),
Gather->getMemoryVT(), dl, Ops,
Gather->getMemOperand());
SDValue Chain = Res.getValue(1);
if (getTypeAction(*DAG.getContext(), MVT::v2i32) != TypeWidenVector)
Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
DAG.getIntPtrConstant(0, dl));
Results.push_back(Res);
Results.push_back(Chain);
return;
}
break;
}
}
}
const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
switch ((X86ISD::NodeType)Opcode) {
case X86ISD::FIRST_NUMBER: break;
case X86ISD::BSF: return "X86ISD::BSF";
case X86ISD::BSR: return "X86ISD::BSR";
case X86ISD::SHLD: return "X86ISD::SHLD";
case X86ISD::SHRD: return "X86ISD::SHRD";
case X86ISD::FAND: return "X86ISD::FAND";
case X86ISD::FANDN: return "X86ISD::FANDN";
case X86ISD::FOR: return "X86ISD::FOR";
case X86ISD::FXOR: return "X86ISD::FXOR";
case X86ISD::FILD: return "X86ISD::FILD";
case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
case X86ISD::FLD: return "X86ISD::FLD";
case X86ISD::FST: return "X86ISD::FST";
case X86ISD::CALL: return "X86ISD::CALL";
case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
case X86ISD::BT: return "X86ISD::BT";
case X86ISD::CMP: return "X86ISD::CMP";
case X86ISD::COMI: return "X86ISD::COMI";
case X86ISD::UCOMI: return "X86ISD::UCOMI";
case X86ISD::CMPM: return "X86ISD::CMPM";
case X86ISD::CMPM_RND: return "X86ISD::CMPM_RND";
case X86ISD::SETCC: return "X86ISD::SETCC";
case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
case X86ISD::FSETCC: return "X86ISD::FSETCC";
case X86ISD::FSETCCM: return "X86ISD::FSETCCM";
case X86ISD::FSETCCM_RND: return "X86ISD::FSETCCM_RND";
case X86ISD::CMOV: return "X86ISD::CMOV";
case X86ISD::BRCOND: return "X86ISD::BRCOND";
case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
case X86ISD::IRET: return "X86ISD::IRET";
case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
case X86ISD::Wrapper: return "X86ISD::Wrapper";
case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
case X86ISD::MOVDQ2Q: return "X86ISD::MOVDQ2Q";
case X86ISD::MMX_MOVD2W: return "X86ISD::MMX_MOVD2W";
case X86ISD::MMX_MOVW2D: return "X86ISD::MMX_MOVW2D";
case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
case X86ISD::PINSRB: return "X86ISD::PINSRB";
case X86ISD::PINSRW: return "X86ISD::PINSRW";
case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
case X86ISD::ANDNP: return "X86ISD::ANDNP";
case X86ISD::BLENDI: return "X86ISD::BLENDI";
case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
case X86ISD::ADDUS: return "X86ISD::ADDUS";
case X86ISD::SUBUS: return "X86ISD::SUBUS";
case X86ISD::HADD: return "X86ISD::HADD";
case X86ISD::HSUB: return "X86ISD::HSUB";
case X86ISD::FHADD: return "X86ISD::FHADD";
case X86ISD::FHSUB: return "X86ISD::FHSUB";
case X86ISD::CONFLICT: return "X86ISD::CONFLICT";
case X86ISD::FMAX: return "X86ISD::FMAX";
case X86ISD::FMAXS: return "X86ISD::FMAXS";
case X86ISD::FMAX_RND: return "X86ISD::FMAX_RND";
case X86ISD::FMAXS_RND: return "X86ISD::FMAX_RND";
case X86ISD::FMIN: return "X86ISD::FMIN";
case X86ISD::FMINS: return "X86ISD::FMINS";
case X86ISD::FMIN_RND: return "X86ISD::FMIN_RND";
case X86ISD::FMINS_RND: return "X86ISD::FMINS_RND";
case X86ISD::FMAXC: return "X86ISD::FMAXC";
case X86ISD::FMINC: return "X86ISD::FMINC";
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
case X86ISD::FRCP: return "X86ISD::FRCP";
case X86ISD::EXTRQI: return "X86ISD::EXTRQI";
case X86ISD::INSERTQI: return "X86ISD::INSERTQI";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
case X86ISD::EH_SJLJ_SETUP_DISPATCH:
return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
case X86ISD::LADD: return "X86ISD::LADD";
case X86ISD::LSUB: return "X86ISD::LSUB";
case X86ISD::LOR: return "X86ISD::LOR";
case X86ISD::LXOR: return "X86ISD::LXOR";
case X86ISD::LAND: return "X86ISD::LAND";
case X86ISD::LINC: return "X86ISD::LINC";
case X86ISD::LDEC: return "X86ISD::LDEC";
case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
case X86ISD::VZEXT: return "X86ISD::VZEXT";
case X86ISD::VSEXT: return "X86ISD::VSEXT";
case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
case X86ISD::VTRUNCS: return "X86ISD::VTRUNCS";
case X86ISD::VTRUNCUS: return "X86ISD::VTRUNCUS";
case X86ISD::VTRUNCSTORES: return "X86ISD::VTRUNCSTORES";
case X86ISD::VTRUNCSTOREUS: return "X86ISD::VTRUNCSTOREUS";
case X86ISD::VMTRUNCSTORES: return "X86ISD::VMTRUNCSTORES";
case X86ISD::VMTRUNCSTOREUS: return "X86ISD::VMTRUNCSTOREUS";
case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
case X86ISD::VFPEXT_RND: return "X86ISD::VFPEXT_RND";
case X86ISD::VFPEXTS_RND: return "X86ISD::VFPEXTS_RND";
case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
case X86ISD::VFPROUND_RND: return "X86ISD::VFPROUND_RND";
case X86ISD::VFPROUNDS_RND: return "X86ISD::VFPROUNDS_RND";
case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
case X86ISD::VSHL: return "X86ISD::VSHL";
case X86ISD::VSRL: return "X86ISD::VSRL";
case X86ISD::VSRA: return "X86ISD::VSRA";
case X86ISD::VSHLI: return "X86ISD::VSHLI";
case X86ISD::VSRLI: return "X86ISD::VSRLI";
case X86ISD::VSRAI: return "X86ISD::VSRAI";
case X86ISD::VSRAV: return "X86ISD::VSRAV";
case X86ISD::VROTLI: return "X86ISD::VROTLI";
case X86ISD::VROTRI: return "X86ISD::VROTRI";
case X86ISD::VPPERM: return "X86ISD::VPPERM";
case X86ISD::CMPP: return "X86ISD::CMPP";
case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
case X86ISD::PHMINPOS: return "X86ISD::PHMINPOS";
case X86ISD::ADD: return "X86ISD::ADD";
case X86ISD::SUB: return "X86ISD::SUB";
case X86ISD::ADC: return "X86ISD::ADC";
case X86ISD::SBB: return "X86ISD::SBB";
case X86ISD::SMUL: return "X86ISD::SMUL";
case X86ISD::UMUL: return "X86ISD::UMUL";
case X86ISD::SMUL8: return "X86ISD::SMUL8";
case X86ISD::UMUL8: return "X86ISD::UMUL8";
case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
case X86ISD::INC: return "X86ISD::INC";
case X86ISD::DEC: return "X86ISD::DEC";
case X86ISD::OR: return "X86ISD::OR";
case X86ISD::XOR: return "X86ISD::XOR";
case X86ISD::AND: return "X86ISD::AND";
case X86ISD::BEXTR: return "X86ISD::BEXTR";
case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
case X86ISD::MOVMSK: return "X86ISD::MOVMSK";
case X86ISD::PTEST: return "X86ISD::PTEST";
case X86ISD::TESTP: return "X86ISD::TESTP";
case X86ISD::KORTEST: return "X86ISD::KORTEST";
case X86ISD::KTEST: return "X86ISD::KTEST";
case X86ISD::KADD: return "X86ISD::KADD";
case X86ISD::KSHIFTL: return "X86ISD::KSHIFTL";
case X86ISD::KSHIFTR: return "X86ISD::KSHIFTR";
case X86ISD::PACKSS: return "X86ISD::PACKSS";
case X86ISD::PACKUS: return "X86ISD::PACKUS";
case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
case X86ISD::VALIGN: return "X86ISD::VALIGN";
case X86ISD::VSHLD: return "X86ISD::VSHLD";
case X86ISD::VSHRD: return "X86ISD::VSHRD";
case X86ISD::VSHLDV: return "X86ISD::VSHLDV";
case X86ISD::VSHRDV: return "X86ISD::VSHRDV";
case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
case X86ISD::SHUFP: return "X86ISD::SHUFP";
case X86ISD::SHUF128: return "X86ISD::SHUF128";
case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
case X86ISD::MOVSD: return "X86ISD::MOVSD";
case X86ISD::MOVSS: return "X86ISD::MOVSS";
case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
case X86ISD::SUBV_BROADCAST: return "X86ISD::SUBV_BROADCAST";
case X86ISD::VPERMILPV: return "X86ISD::VPERMILPV";
case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
case X86ISD::VPERMV: return "X86ISD::VPERMV";
case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
case X86ISD::VPERMI: return "X86ISD::VPERMI";
case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG";
case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM";
case X86ISD::VFIXUPIMMS: return "X86ISD::VFIXUPIMMS";
case X86ISD::VRANGE: return "X86ISD::VRANGE";
case X86ISD::VRANGE_RND: return "X86ISD::VRANGE_RND";
case X86ISD::VRANGES: return "X86ISD::VRANGES";
case X86ISD::VRANGES_RND: return "X86ISD::VRANGES_RND";
case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
case X86ISD::PSADBW: return "X86ISD::PSADBW";
case X86ISD::DBPSADBW: return "X86ISD::DBPSADBW";
case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
case X86ISD::MFENCE: return "X86ISD::MFENCE";
case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
case X86ISD::SAHF: return "X86ISD::SAHF";
case X86ISD::RDRAND: return "X86ISD::RDRAND";
case X86ISD::RDSEED: return "X86ISD::RDSEED";
case X86ISD::VPMADDUBSW: return "X86ISD::VPMADDUBSW";
case X86ISD::VPMADDWD: return "X86ISD::VPMADDWD";
case X86ISD::VPSHA: return "X86ISD::VPSHA";
case X86ISD::VPSHL: return "X86ISD::VPSHL";
case X86ISD::VPCOM: return "X86ISD::VPCOM";
case X86ISD::VPCOMU: return "X86ISD::VPCOMU";
case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2";
case X86ISD::FMSUB: return "X86ISD::FMSUB";
case X86ISD::FNMADD: return "X86ISD::FNMADD";
case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
case X86ISD::FMADD_RND: return "X86ISD::FMADD_RND";
case X86ISD::FNMADD_RND: return "X86ISD::FNMADD_RND";
case X86ISD::FMSUB_RND: return "X86ISD::FMSUB_RND";
case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H";
case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L";
case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
case X86ISD::VRNDSCALE_RND: return "X86ISD::VRNDSCALE_RND";
case X86ISD::VRNDSCALES: return "X86ISD::VRNDSCALES";
case X86ISD::VRNDSCALES_RND: return "X86ISD::VRNDSCALES_RND";
case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
case X86ISD::VREDUCE_RND: return "X86ISD::VREDUCE_RND";
case X86ISD::VREDUCES: return "X86ISD::VREDUCES";
case X86ISD::VREDUCES_RND: return "X86ISD::VREDUCES_RND";
case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
case X86ISD::VGETMANT_RND: return "X86ISD::VGETMANT_RND";
case X86ISD::VGETMANTS: return "X86ISD::VGETMANTS";
case X86ISD::VGETMANTS_RND: return "X86ISD::VGETMANTS_RND";
case X86ISD::PCMPESTR: return "X86ISD::PCMPESTR";
case X86ISD::PCMPISTR: return "X86ISD::PCMPISTR";
case X86ISD::XTEST: return "X86ISD::XTEST";
case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
case X86ISD::EXPAND: return "X86ISD::EXPAND";
case X86ISD::SELECT: return "X86ISD::SELECT";
case X86ISD::SELECTS: return "X86ISD::SELECTS";
case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
case X86ISD::RCP14: return "X86ISD::RCP14";
case X86ISD::RCP14S: return "X86ISD::RCP14S";
case X86ISD::RCP28: return "X86ISD::RCP28";
case X86ISD::RCP28S: return "X86ISD::RCP28S";
case X86ISD::EXP2: return "X86ISD::EXP2";
case X86ISD::RSQRT14: return "X86ISD::RSQRT14";
case X86ISD::RSQRT14S: return "X86ISD::RSQRT14S";
case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
case X86ISD::RSQRT28S: return "X86ISD::RSQRT28S";
case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
case X86ISD::FADDS_RND: return "X86ISD::FADDS_RND";
case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
case X86ISD::FSUBS_RND: return "X86ISD::FSUBS_RND";
case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
case X86ISD::FMULS_RND: return "X86ISD::FMULS_RND";
case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
case X86ISD::FDIVS_RND: return "X86ISD::FDIVS_RND";
case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND";
case X86ISD::FSQRTS_RND: return "X86ISD::FSQRTS_RND";
case X86ISD::FGETEXP_RND: return "X86ISD::FGETEXP_RND";
case X86ISD::FGETEXPS_RND: return "X86ISD::FGETEXPS_RND";
case X86ISD::SCALEF: return "X86ISD::SCALEF";
case X86ISD::SCALEFS: return "X86ISD::SCALEFS";
case X86ISD::ADDS: return "X86ISD::ADDS";
case X86ISD::SUBS: return "X86ISD::SUBS";
case X86ISD::AVG: return "X86ISD::AVG";
case X86ISD::MULHRS: return "X86ISD::MULHRS";
case X86ISD::SINT_TO_FP_RND: return "X86ISD::SINT_TO_FP_RND";
case X86ISD::UINT_TO_FP_RND: return "X86ISD::UINT_TO_FP_RND";
case X86ISD::CVTTP2SI: return "X86ISD::CVTTP2SI";
case X86ISD::CVTTP2UI: return "X86ISD::CVTTP2UI";
case X86ISD::CVTTP2SI_RND: return "X86ISD::CVTTP2SI_RND";
case X86ISD::CVTTP2UI_RND: return "X86ISD::CVTTP2UI_RND";
case X86ISD::CVTTS2SI_RND: return "X86ISD::CVTTS2SI_RND";
case X86ISD::CVTTS2UI_RND: return "X86ISD::CVTTS2UI_RND";
case X86ISD::CVTSI2P: return "X86ISD::CVTSI2P";
case X86ISD::CVTUI2P: return "X86ISD::CVTUI2P";
case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS";
case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS";
case X86ISD::MULTISHIFT: return "X86ISD::MULTISHIFT";
case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND";
case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND";
case X86ISD::CVTPS2PH: return "X86ISD::CVTPS2PH";
case X86ISD::CVTPH2PS: return "X86ISD::CVTPH2PS";
case X86ISD::CVTPH2PS_RND: return "X86ISD::CVTPH2PS_RND";
case X86ISD::CVTP2SI: return "X86ISD::CVTP2SI";
case X86ISD::CVTP2UI: return "X86ISD::CVTP2UI";
case X86ISD::CVTP2SI_RND: return "X86ISD::CVTP2SI_RND";
case X86ISD::CVTP2UI_RND: return "X86ISD::CVTP2UI_RND";
case X86ISD::CVTS2SI_RND: return "X86ISD::CVTS2SI_RND";
case X86ISD::CVTS2UI_RND: return "X86ISD::CVTS2UI_RND";
case X86ISD::LWPINS: return "X86ISD::LWPINS";
case X86ISD::MGATHER: return "X86ISD::MGATHER";
case X86ISD::MSCATTER: return "X86ISD::MSCATTER";
case X86ISD::VPDPBUSD: return "X86ISD::VPDPBUSD";
case X86ISD::VPDPBUSDS: return "X86ISD::VPDPBUSDS";
case X86ISD::VPDPWSSD: return "X86ISD::VPDPWSSD";
case X86ISD::VPDPWSSDS: return "X86ISD::VPDPWSSDS";
case X86ISD::VPSHUFBITQMB: return "X86ISD::VPSHUFBITQMB";
case X86ISD::GF2P8MULB: return "X86ISD::GF2P8MULB";
case X86ISD::GF2P8AFFINEQB: return "X86ISD::GF2P8AFFINEQB";
case X86ISD::GF2P8AFFINEINVQB: return "X86ISD::GF2P8AFFINEINVQB";
case X86ISD::NT_CALL: return "X86ISD::NT_CALL";
case X86ISD::NT_BRIND: return "X86ISD::NT_BRIND";
case X86ISD::UMWAIT: return "X86ISD::UMWAIT";
case X86ISD::TPAUSE: return "X86ISD::TPAUSE";
}
return nullptr;
}
/// Return true if the addressing mode represented by AM is legal for this
/// target, for a load/store of the specified type.
bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
unsigned AS,
Instruction *I) const {
// X86 supports extremely general addressing modes.
CodeModel::Model M = getTargetMachine().getCodeModel();
// X86 allows a sign-extended 32-bit immediate field as a displacement.
if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
return false;
if (AM.BaseGV) {
unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
// If a reference to this global requires an extra load, we can't fold it.
if (isGlobalStubReference(GVFlags))
return false;
// If BaseGV requires a register for the PIC base, we cannot also have a
// BaseReg specified.
if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
return false;
// If lower 4G is not available, then we must use rip-relative addressing.
if ((M != CodeModel::Small || isPositionIndependent()) &&
Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
return false;
}
switch (AM.Scale) {
case 0:
case 1:
case 2:
case 4:
case 8:
// These scales always work.
break;
case 3:
case 5:
case 9:
// These scales are formed with basereg+scalereg. Only accept if there is
// no basereg yet.
if (AM.HasBaseReg)
return false;
break;
default: // Other stuff never works.
return false;
}
return true;
}
bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
unsigned Bits = Ty->getScalarSizeInBits();
// 8-bit shifts are always expensive, but versions with a scalar amount aren't
// particularly cheaper than those without.
if (Bits == 8)
return false;
// XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
if (Subtarget.hasXOP() && Ty->getPrimitiveSizeInBits() == 128 &&
(Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
return false;
// AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
// shifts just as cheap as scalar ones.
if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
return false;
// AVX512BW has shifts such as vpsllvw.
if (Subtarget.hasBWI() && Bits == 16)
return false;
// Otherwise, it's significantly cheaper to shift by a scalar amount than by a
// fully general vector.
return true;
}
bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
return false;
unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
return NumBits1 > NumBits2;
}
bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
return false;
if (!isTypeLegal(EVT::getEVT(Ty1)))
return false;
assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
// Assuming the caller doesn't have a zeroext or signext return parameter,
// truncation all the way down to i1 is valid.
return true;
}
bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
return isInt<32>(Imm);
}
bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
// Can also use sub to handle negated immediates.
return isInt<32>(Imm);
}
bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
if (!VT1.isInteger() || !VT2.isInteger())
return false;
unsigned NumBits1 = VT1.getSizeInBits();
unsigned NumBits2 = VT2.getSizeInBits();
return NumBits1 > NumBits2;
}
bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
// x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
}
bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
// x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
}
bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
EVT VT1 = Val.getValueType();
if (isZExtFree(VT1, VT2))
return true;
if (Val.getOpcode() != ISD::LOAD)
return false;
if (!VT1.isSimple() || !VT1.isInteger() ||
!VT2.isSimple() || !VT2.isInteger())
return false;
switch (VT1.getSimpleVT().SimpleTy) {
default: break;
case MVT::i8:
case MVT::i16:
case MVT::i32:
// X86 has 8, 16, and 32-bit zero-extending loads.
return true;
}
return false;
}
bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
EVT SrcVT = ExtVal.getOperand(0).getValueType();
// There is no extending load for vXi1.
if (SrcVT.getScalarType() == MVT::i1)
return false;
return true;
}
bool
X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
if (!Subtarget.hasAnyFMA())
return false;
VT = VT.getScalarType();
if (!VT.isSimple())
return false;
switch (VT.getSimpleVT().SimpleTy) {
case MVT::f32:
case MVT::f64:
return true;
default:
break;
}
return false;
}
bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
// i16 instructions are longer (0x66 prefix) and potentially slower.
return !(VT1 == MVT::i32 && VT2 == MVT::i16);
}
/// Targets can use this to indicate that they only support *some*
/// VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
/// are assumed to be legal.
bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
if (!VT.isSimple())
return false;
// Not for i1 vectors
if (VT.getSimpleVT().getScalarType() == MVT::i1)
return false;
// Very little shuffling can be done for 64-bit vectors right now.
if (VT.getSimpleVT().getSizeInBits() == 64)
return false;
// We only care that the types being shuffled are legal. The lowering can
// handle any possible shuffle mask that results.
return isTypeLegal(VT.getSimpleVT());
}
bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
EVT VT) const {
// Don't convert an 'and' into a shuffle that we don't directly support.
// vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
if (!Subtarget.hasAVX2())
if (VT == MVT::v32i8 || VT == MVT::v16i16)
return false;
// Just delegate to the generic legality, clear masks aren't special.
return isShuffleMaskLegal(Mask, VT);
}
bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
// If the subtarget is using retpolines, we need to not generate jump tables.
if (Subtarget.useRetpoline())
return false;
// Otherwise, fallback on the generic logic.
return TargetLowering::areJTsAllowed(Fn);
}
//===----------------------------------------------------------------------===//
// X86 Scheduler Hooks
//===----------------------------------------------------------------------===//
/// Utility function to emit xbegin specifying the start of an RTM region.
static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
const TargetInstrInfo *TII) {
DebugLoc DL = MI.getDebugLoc();
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator I = ++MBB->getIterator();
// For the v = xbegin(), we generate
//
// thisMBB:
// xbegin sinkMBB
//
// mainMBB:
// s0 = -1
//
// fallBB:
// eax = # XABORT_DEF
// s1 = eax
//
// sinkMBB:
// v = phi(s0/mainBB, s1/fallBB)
MachineBasicBlock *thisMBB = MBB;
MachineFunction *MF = MBB->getParent();
MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
MF->insert(I, mainMBB);
MF->insert(I, fallMBB);
MF->insert(I, sinkMBB);
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), MBB,
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned DstReg = MI.getOperand(0).getReg();
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
unsigned mainDstReg = MRI.createVirtualRegister(RC);
unsigned fallDstReg = MRI.createVirtualRegister(RC);
// thisMBB:
// xbegin fallMBB
// # fallthrough to mainMBB
// # abortion to fallMBB
BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
thisMBB->addSuccessor(mainMBB);
thisMBB->addSuccessor(fallMBB);
// mainMBB:
// mainDstReg := -1
BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
mainMBB->addSuccessor(sinkMBB);
// fallMBB:
// ; pseudo instruction to model hardware's definition from XABORT
// EAX := XABORT_DEF
// fallDstReg := EAX
BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
.addReg(X86::EAX);
fallMBB->addSuccessor(sinkMBB);
// sinkMBB:
// DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
.addReg(mainDstReg).addMBB(mainMBB)
.addReg(fallDstReg).addMBB(fallMBB);
MI.eraseFromParent();
return sinkMBB;
}
static MachineBasicBlock *emitWRPKRU(MachineInstr &MI, MachineBasicBlock *BB,
const X86Subtarget &Subtarget) {
DebugLoc dl = MI.getDebugLoc();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// insert input VAL into EAX
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX)
.addReg(MI.getOperand(0).getReg());
// insert zero to ECX
BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX);
// insert zero to EDX
BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::EDX);
// insert WRPKRU instruction
BuildMI(*BB, MI, dl, TII->get(X86::WRPKRUr));
MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
static MachineBasicBlock *emitRDPKRU(MachineInstr &MI, MachineBasicBlock *BB,
const X86Subtarget &Subtarget) {
DebugLoc dl = MI.getDebugLoc();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// insert zero to ECX
BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX);
// insert RDPKRU instruction
BuildMI(*BB, MI, dl, TII->get(X86::RDPKRUr));
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
.addReg(X86::EAX);
MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
static MachineBasicBlock *emitMonitor(MachineInstr &MI, MachineBasicBlock *BB,
const X86Subtarget &Subtarget,
unsigned Opc) {
DebugLoc dl = MI.getDebugLoc();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// Address into RAX/EAX, other two args into ECX, EDX.
unsigned MemOpc = Subtarget.is64Bit() ? X86::LEA64r : X86::LEA32r;
unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
for (int i = 0; i < X86::AddrNumOperands; ++i)
MIB.add(MI.getOperand(i));
unsigned ValOps = X86::AddrNumOperands;
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
.addReg(MI.getOperand(ValOps).getReg());
BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
.addReg(MI.getOperand(ValOps + 1).getReg());
// The instruction doesn't actually take any operands though.
BuildMI(*BB, MI, dl, TII->get(Opc));
MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
static MachineBasicBlock *emitClzero(MachineInstr *MI, MachineBasicBlock *BB,
const X86Subtarget &Subtarget) {
DebugLoc dl = MI->getDebugLoc();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
// Address into RAX/EAX
unsigned MemOpc = Subtarget.is64Bit() ? X86::LEA64r : X86::LEA32r;
unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
for (int i = 0; i < X86::AddrNumOperands; ++i)
MIB.add(MI->getOperand(i));
// The instruction doesn't actually take any operands though.
BuildMI(*BB, MI, dl, TII->get(X86::CLZEROr));
MI->eraseFromParent(); // The pseudo is gone now.
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const {
// Emit va_arg instruction on X86-64.
// Operands to this pseudo-instruction:
// 0 ) Output : destination address (reg)
// 1-5) Input : va_list address (addr, i64mem)
// 6 ) ArgSize : Size (in bytes) of vararg type
// 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
// 8 ) Align : Alignment of type
// 9 ) EFLAGS (implicit-def)
assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
static_assert(X86::AddrNumOperands == 5,
"VAARG_64 assumes 5 address operands");
unsigned DestReg = MI.getOperand(0).getReg();
MachineOperand &Base = MI.getOperand(1);
MachineOperand &Scale = MI.getOperand(2);
MachineOperand &Index = MI.getOperand(3);
MachineOperand &Disp = MI.getOperand(4);
MachineOperand &Segment = MI.getOperand(5);
unsigned ArgSize = MI.getOperand(6).getImm();
unsigned ArgMode = MI.getOperand(7).getImm();
unsigned Align = MI.getOperand(8).getImm();
// Memory Reference
assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
// Machine Information
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
DebugLoc DL = MI.getDebugLoc();
// struct va_list {
// i32 gp_offset
// i32 fp_offset
// i64 overflow_area (address)
// i64 reg_save_area (address)
// }
// sizeof(va_list) = 24
// alignment(va_list) = 8
unsigned TotalNumIntRegs = 6;
unsigned TotalNumXMMRegs = 8;
bool UseGPOffset = (ArgMode == 1);
bool UseFPOffset = (ArgMode == 2);
unsigned MaxOffset = TotalNumIntRegs * 8 +
(UseFPOffset ? TotalNumXMMRegs * 16 : 0);
/* Align ArgSize to a multiple of 8 */
unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
bool NeedsAlign = (Align > 8);
MachineBasicBlock *thisMBB = MBB;
MachineBasicBlock *overflowMBB;
MachineBasicBlock *offsetMBB;
MachineBasicBlock *endMBB;
unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
unsigned OffsetReg = 0;
if (!UseGPOffset && !UseFPOffset) {
// If we only pull from the overflow region, we don't create a branch.
// We don't need to alter control flow.
OffsetDestReg = 0; // unused
OverflowDestReg = DestReg;
offsetMBB = nullptr;
overflowMBB = thisMBB;
endMBB = thisMBB;
} else {
// First emit code to check if gp_offset (or fp_offset) is below the bound.
// If so, pull the argument from reg_save_area. (branch to offsetMBB)
// If not, pull from overflow_area. (branch to overflowMBB)
//
// thisMBB
// | .
// | .
// offsetMBB overflowMBB
// | .
// | .
// endMBB
// Registers for the PHI in endMBB
OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
MachineFunction *MF = MBB->getParent();
overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator MBBIter = ++MBB->getIterator();
// Insert the new basic blocks
MF->insert(MBBIter, offsetMBB);
MF->insert(MBBIter, overflowMBB);
MF->insert(MBBIter, endMBB);
// Transfer the remainder of MBB and its successor edges to endMBB.
endMBB->splice(endMBB->begin(), thisMBB,
std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
// Make offsetMBB and overflowMBB successors of thisMBB
thisMBB->addSuccessor(offsetMBB);
thisMBB->addSuccessor(overflowMBB);
// endMBB is a successor of both offsetMBB and overflowMBB
offsetMBB->addSuccessor(endMBB);
overflowMBB->addSuccessor(endMBB);
// Load the offset value into a register
OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
.add(Base)
.add(Scale)
.add(Index)
.addDisp(Disp, UseFPOffset ? 4 : 0)
.add(Segment)
.setMemRefs(MMOBegin, MMOEnd);
// Check if there is enough room left to pull this argument.
BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
.addReg(OffsetReg)
.addImm(MaxOffset + 8 - ArgSizeA8);
// Branch to "overflowMBB" if offset >= max
// Fall through to "offsetMBB" otherwise
BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
.addMBB(overflowMBB);
}
// In offsetMBB, emit code to use the reg_save_area.
if (offsetMBB) {
assert(OffsetReg != 0);
// Read the reg_save_area address.
unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
.add(Base)
.add(Scale)
.add(Index)
.addDisp(Disp, 16)
.add(Segment)
.setMemRefs(MMOBegin, MMOEnd);
// Zero-extend the offset
unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
.addImm(0)
.addReg(OffsetReg)
.addImm(X86::sub_32bit);
// Add the offset to the reg_save_area to get the final address.
BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
.addReg(OffsetReg64)
.addReg(RegSaveReg);
// Compute the offset for the next argument
unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
.addReg(OffsetReg)
.addImm(UseFPOffset ? 16 : 8);
// Store it back into the va_list.
BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
.add(Base)
.add(Scale)
.add(Index)
.addDisp(Disp, UseFPOffset ? 4 : 0)
.add(Segment)
.addReg(NextOffsetReg)
.setMemRefs(MMOBegin, MMOEnd);
// Jump to endMBB
BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
.addMBB(endMBB);
}
//
// Emit code to use overflow area
//
// Load the overflow_area address into a register.
unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
.add(Base)
.add(Scale)
.add(Index)
.addDisp(Disp, 8)
.add(Segment)
.setMemRefs(MMOBegin, MMOEnd);
// If we need to align it, do so. Otherwise, just copy the address
// to OverflowDestReg.
if (NeedsAlign) {
// Align the overflow address
assert(isPowerOf2_32(Align) && "Alignment must be a power of 2");
unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
// aligned_addr = (addr + (align-1)) & ~(align-1)
BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
.addReg(OverflowAddrReg)
.addImm(Align-1);
BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
.addReg(TmpReg)
.addImm(~(uint64_t)(Align-1));
} else {
BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
.addReg(OverflowAddrReg);
}
// Compute the next overflow address after this argument.
// (the overflow address should be kept 8-byte aligned)
unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
.addReg(OverflowDestReg)
.addImm(ArgSizeA8);
// Store the new overflow address.
BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
.add(Base)
.add(Scale)
.add(Index)
.addDisp(Disp, 8)
.add(Segment)
.addReg(NextAddrReg)
.setMemRefs(MMOBegin, MMOEnd);
// If we branched, emit the PHI to the front of endMBB.
if (offsetMBB) {
BuildMI(*endMBB, endMBB->begin(), DL,
TII->get(X86::PHI), DestReg)
.addReg(OffsetDestReg).addMBB(offsetMBB)
.addReg(OverflowDestReg).addMBB(overflowMBB);
}
// Erase the pseudo instruction
MI.eraseFromParent();
return endMBB;
}
MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
MachineInstr &MI, MachineBasicBlock *MBB) const {
// Emit code to save XMM registers to the stack. The ABI says that the
// number of registers to save is given in %al, so it's theoretically
// possible to do an indirect jump trick to avoid saving all of them,
// however this code takes a simpler approach and just executes all
// of the stores if %al is non-zero. It's less code, and it's probably
// easier on the hardware branch predictor, and stores aren't all that
// expensive anyway.
// Create the new basic blocks. One block contains all the XMM stores,
// and one block is the final destination regardless of whether any
// stores were performed.
const BasicBlock *LLVM_BB = MBB->getBasicBlock();
MachineFunction *F = MBB->getParent();
MachineFunction::iterator MBBIter = ++MBB->getIterator();
MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
F->insert(MBBIter, XMMSaveMBB);
F->insert(MBBIter, EndMBB);
// Transfer the remainder of MBB and its successor edges to EndMBB.
EndMBB->splice(EndMBB->begin(), MBB,
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
// The original block will now fall through to the XMM save block.
MBB->addSuccessor(XMMSaveMBB);
// The XMMSaveMBB will fall through to the end block.
XMMSaveMBB->addSuccessor(EndMBB);
// Now add the instructions.
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
unsigned CountReg = MI.getOperand(0).getReg();
int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
// If %al is 0, branch around the XMM save block.
BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
MBB->addSuccessor(EndMBB);
}
// Make sure the last operand is EFLAGS, which gets clobbered by the branch
// that was just emitted, but clearly shouldn't be "saved".
assert((MI.getNumOperands() <= 3 ||
!MI.getOperand(MI.getNumOperands() - 1).isReg() ||
MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
"Expected last argument to be EFLAGS");
unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
// In the XMM save block, save all the XMM argument registers.
for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
MachineMemOperand *MMO = F->getMachineMemOperand(
MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
MachineMemOperand::MOStore,
/*Size=*/16, /*Align=*/16);
BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
.addFrameIndex(RegSaveFrameIndex)
.addImm(/*Scale=*/1)
.addReg(/*IndexReg=*/0)
.addImm(/*Disp=*/Offset)
.addReg(/*Segment=*/0)
.addReg(MI.getOperand(i).getReg())
.addMemOperand(MMO);
}
MI.eraseFromParent(); // The pseudo instruction is gone now.
return EndMBB;
}
// The EFLAGS operand of SelectItr might be missing a kill marker
// because there were multiple uses of EFLAGS, and ISel didn't know
// which to mark. Figure out whether SelectItr should have had a
// kill marker, and set it if it should. Returns the correct kill
// marker value.
static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
MachineBasicBlock* BB,
const TargetRegisterInfo* TRI) {
// Scan forward through BB for a use/def of EFLAGS.
MachineBasicBlock::iterator miI(std::next(SelectItr));
for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
const MachineInstr& mi = *miI;
if (mi.readsRegister(X86::EFLAGS))
return false;
if (mi.definesRegister(X86::EFLAGS))
break; // Should have kill-flag - update below.
}
// If we hit the end of the block, check whether EFLAGS is live into a
// successor.
if (miI == BB->end()) {
for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
sEnd = BB->succ_end();
sItr != sEnd; ++sItr) {
MachineBasicBlock* succ = *sItr;
if (succ->isLiveIn(X86::EFLAGS))
return false;
}
}
// We found a def, or hit the end of the basic block and EFLAGS wasn't live
// out. SelectMI should have a kill flag on EFLAGS.
SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
return true;
}
// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
// together with other CMOV pseudo-opcodes into a single basic-block with
// conditional jump around it.
static bool isCMOVPseudo(MachineInstr &MI) {
switch (MI.getOpcode()) {
case X86::CMOV_FR32:
case X86::CMOV_FR64:
case X86::CMOV_GR8:
case X86::CMOV_GR16:
case X86::CMOV_GR32:
case X86::CMOV_RFP32:
case X86::CMOV_RFP64:
case X86::CMOV_RFP80:
case X86::CMOV_V2F64:
case X86::CMOV_V2I64:
case X86::CMOV_V4F32:
case X86::CMOV_V4F64:
case X86::CMOV_V4I64:
case X86::CMOV_V16F32:
case X86::CMOV_V8F32:
case X86::CMOV_V8F64:
case X86::CMOV_V8I64:
case X86::CMOV_V8I1:
case X86::CMOV_V16I1:
case X86::CMOV_V32I1:
case X86::CMOV_V64I1:
return true;
default:
return false;
}
}
// Helper function, which inserts PHI functions into SinkMBB:
// %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
// where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
// in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
// the last PHI function inserted.
static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
MachineBasicBlock *SinkMBB) {
MachineFunction *MF = TrueMBB->getParent();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
DebugLoc DL = MIItBegin->getDebugLoc();
X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
// As we are creating the PHIs, we have to be careful if there is more than
// one. Later CMOVs may reference the results of earlier CMOVs, but later
// PHIs have to reference the individual true/false inputs from earlier PHIs.
// That also means that PHI construction must work forward from earlier to
// later, and that the code must maintain a mapping from earlier PHI's
// destination registers, and the registers that went into the PHI.
DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
MachineInstrBuilder MIB;
for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
unsigned DestReg = MIIt->getOperand(0).getReg();
unsigned Op1Reg = MIIt->getOperand(1).getReg();
unsigned Op2Reg = MIIt->getOperand(2).getReg();
// If this CMOV we are generating is the opposite condition from
// the jump we generated, then we have to swap the operands for the
// PHI that is going to be generated.
if (MIIt->getOperand(3).getImm() == OppCC)
std::swap(Op1Reg, Op2Reg);
if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
Op1Reg = RegRewriteTable[Op1Reg].first;
if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
Op2Reg = RegRewriteTable[Op2Reg].second;
MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
.addReg(Op1Reg)
.addMBB(FalseMBB)
.addReg(Op2Reg)
.addMBB(TrueMBB);
// Add this PHI to the rewrite table.
RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
}
return MIB;
}
// Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
MachineBasicBlock *
X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
MachineInstr &SecondCascadedCMOV,
MachineBasicBlock *ThisMBB) const {
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = FirstCMOV.getDebugLoc();
// We lower cascaded CMOVs such as
//
// (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
//
// to two successive branches.
//
// Without this, we would add a PHI between the two jumps, which ends up
// creating a few copies all around. For instance, for
//
// (sitofp (zext (fcmp une)))
//
// we would generate:
//
// ucomiss %xmm1, %xmm0
// movss <1.0f>, %xmm0
// movaps %xmm0, %xmm1
// jne .LBB5_2
// xorps %xmm1, %xmm1
// .LBB5_2:
// jp .LBB5_4
// movaps %xmm1, %xmm0
// .LBB5_4:
// retq
//
// because this custom-inserter would have generated:
//
// A
// | \
// | B
// | /
// C
// | \
// | D
// | /
// E
//
// A: X = ...; Y = ...
// B: empty
// C: Z = PHI [X, A], [Y, B]
// D: empty
// E: PHI [X, C], [Z, D]
//
// If we lower both CMOVs in a single step, we can instead generate:
//
// A
// | \
// | C
// | /|
// |/ |
// | |
// | D
// | /
// E
//
// A: X = ...; Y = ...
// D: empty
// E: PHI [X, A], [X, C], [Y, D]
//
// Which, in our sitofp/fcmp example, gives us something like:
//
// ucomiss %xmm1, %xmm0
// movss <1.0f>, %xmm0
// jne .LBB5_4
// jp .LBB5_4
// xorps %xmm0, %xmm0
// .LBB5_4:
// retq
//
// We lower cascaded CMOV into two successive branches to the same block.
// EFLAGS is used by both, so mark it as live in the second.
const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
MachineFunction *F = ThisMBB->getParent();
MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++ThisMBB->getIterator();
F->insert(It, FirstInsertedMBB);
F->insert(It, SecondInsertedMBB);
F->insert(It, SinkMBB);
// For a cascaded CMOV, we lower it to two successive branches to
// the same block (SinkMBB). EFLAGS is used by both, so mark it as live in
// the FirstInsertedMBB.
FirstInsertedMBB->addLiveIn(X86::EFLAGS);
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
!checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
SecondInsertedMBB->addLiveIn(X86::EFLAGS);
SinkMBB->addLiveIn(X86::EFLAGS);
}
// Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
SinkMBB->splice(SinkMBB->begin(), ThisMBB,
std::next(MachineBasicBlock::iterator(FirstCMOV)),
ThisMBB->end());
SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
// Fallthrough block for ThisMBB.
ThisMBB->addSuccessor(FirstInsertedMBB);
// The true block target of the first branch is always SinkMBB.
ThisMBB->addSuccessor(SinkMBB);
// Fallthrough block for FirstInsertedMBB.
FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
// The true block for the branch of FirstInsertedMBB.
FirstInsertedMBB->addSuccessor(SinkMBB);
// This is fallthrough.
SecondInsertedMBB->addSuccessor(SinkMBB);
// Create the conditional branch instructions.
X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
unsigned Opc = X86::GetCondBranchFromCond(FirstCC);
BuildMI(ThisMBB, DL, TII->get(Opc)).addMBB(SinkMBB);
X86::CondCode SecondCC =
X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
unsigned Opc2 = X86::GetCondBranchFromCond(SecondCC);
BuildMI(FirstInsertedMBB, DL, TII->get(Opc2)).addMBB(SinkMBB);
// SinkMBB:
// %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
unsigned DestReg = FirstCMOV.getOperand(0).getReg();
unsigned Op1Reg = FirstCMOV.getOperand(1).getReg();
unsigned Op2Reg = FirstCMOV.getOperand(2).getReg();
MachineInstrBuilder MIB =
BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
.addReg(Op1Reg)
.addMBB(SecondInsertedMBB)
.addReg(Op2Reg)
.addMBB(ThisMBB);
// The second SecondInsertedMBB provides the same incoming value as the
// FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
// Copy the PHI result to the register defined by the second CMOV.
BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL,
TII->get(TargetOpcode::COPY),
SecondCascadedCMOV.getOperand(0).getReg())
.addReg(FirstCMOV.getOperand(0).getReg());
// Now remove the CMOVs.
FirstCMOV.eraseFromParent();
SecondCascadedCMOV.eraseFromParent();
return SinkMBB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
MachineBasicBlock *ThisMBB) const {
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
// destination vreg to set, the condition code register to branch on, the
// true/false values to select between and a branch opcode to use.
// ThisMBB:
// ...
// TrueVal = ...
// cmpTY ccX, r1, r2
// bCC copy1MBB
// fallthrough --> FalseMBB
// This code lowers all pseudo-CMOV instructions. Generally it lowers these
// as described above, by inserting a BB, and then making a PHI at the join
// point to select the true and false operands of the CMOV in the PHI.
//
// The code also handles two different cases of multiple CMOV opcodes
// in a row.
//
// Case 1:
// In this case, there are multiple CMOVs in a row, all which are based on
// the same condition setting (or the exact opposite condition setting).
// In this case we can lower all the CMOVs using a single inserted BB, and
// then make a number of PHIs at the join point to model the CMOVs. The only
// trickiness here, is that in a case like:
//
// t2 = CMOV cond1 t1, f1
// t3 = CMOV cond1 t2, f2
//
// when rewriting this into PHIs, we have to perform some renaming on the
// temps since you cannot have a PHI operand refer to a PHI result earlier
// in the same block. The "simple" but wrong lowering would be:
//
// t2 = PHI t1(BB1), f1(BB2)
// t3 = PHI t2(BB1), f2(BB2)
//
// but clearly t2 is not defined in BB1, so that is incorrect. The proper
// renaming is to note that on the path through BB1, t2 is really just a
// copy of t1, and do that renaming, properly generating:
//
// t2 = PHI t1(BB1), f1(BB2)
// t3 = PHI t1(BB1), f2(BB2)
//
// Case 2:
// CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
// function - EmitLoweredCascadedSelect.
X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
MachineInstr *LastCMOV = &MI;
MachineBasicBlock::iterator NextMIIt =
std::next(MachineBasicBlock::iterator(MI));
// Check for case 1, where there are multiple CMOVs with the same condition
// first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
// number of jumps the most.
if (isCMOVPseudo(MI)) {
// See if we have a string of CMOVS with the same condition.
while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
(NextMIIt->getOperand(3).getImm() == CC ||
NextMIIt->getOperand(3).getImm() == OppCC)) {
LastCMOV = &*NextMIIt;
++NextMIIt;
}
}
// This checks for case 2, but only do this if we didn't already find
// case 1, as indicated by LastCMOV == MI.
if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
NextMIIt->getOpcode() == MI.getOpcode() &&
NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
NextMIIt->getOperand(1).isKill()) {
return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
}
const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
MachineFunction *F = ThisMBB->getParent();
MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
MachineFunction::iterator It = ++ThisMBB->getIterator();
F->insert(It, FalseMBB);
F->insert(It, SinkMBB);
// If the EFLAGS register isn't dead in the terminator, then claim that it's
// live into the sink and copy blocks.
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
if (!LastCMOV->killsRegister(X86::EFLAGS) &&
!checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
FalseMBB->addLiveIn(X86::EFLAGS);
SinkMBB->addLiveIn(X86::EFLAGS);
}
// Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
SinkMBB->splice(SinkMBB->begin(), ThisMBB,
std::next(MachineBasicBlock::iterator(LastCMOV)),
ThisMBB->end());
SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
// Fallthrough block for ThisMBB.
ThisMBB->addSuccessor(FalseMBB);
// The true block target of the first (or only) branch is always a SinkMBB.
ThisMBB->addSuccessor(SinkMBB);
// Fallthrough block for FalseMBB.
FalseMBB->addSuccessor(SinkMBB);
// Create the conditional branch instruction.
unsigned Opc = X86::GetCondBranchFromCond(CC);
BuildMI(ThisMBB, DL, TII->get(Opc)).addMBB(SinkMBB);
// SinkMBB:
// %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
// ...
MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
MachineBasicBlock::iterator MIItEnd =
std::next(MachineBasicBlock::iterator(LastCMOV));
createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
// Now remove the CMOV(s).
ThisMBB->erase(MIItBegin, MIItEnd);
return SinkMBB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredAtomicFP(MachineInstr &MI,
MachineBasicBlock *BB) const {
// Combine the following atomic floating-point modification pattern:
// a.store(reg OP a.load(acquire), release)
// Transform them into:
// OPss (%gpr), %xmm
// movss %xmm, (%gpr)
// Or sd equivalent for 64-bit operations.
unsigned MOp, FOp;
switch (MI.getOpcode()) {
default: llvm_unreachable("unexpected instr type for EmitLoweredAtomicFP");
case X86::RELEASE_FADD32mr:
FOp = X86::ADDSSrm;
MOp = X86::MOVSSmr;
break;
case X86::RELEASE_FADD64mr:
FOp = X86::ADDSDrm;
MOp = X86::MOVSDmr;
break;
}
const X86InstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
unsigned ValOpIdx = X86::AddrNumOperands;
unsigned VSrc = MI.getOperand(ValOpIdx).getReg();
MachineInstrBuilder MIB =
BuildMI(*BB, MI, DL, TII->get(FOp),
MRI.createVirtualRegister(MRI.getRegClass(VSrc)))
.addReg(VSrc);
for (int i = 0; i < X86::AddrNumOperands; ++i) {
MachineOperand &Operand = MI.getOperand(i);
// Clear any kill flags on register operands as we'll create a second
// instruction using the same address operands.
if (Operand.isReg())
Operand.setIsKill(false);
MIB.add(Operand);
}
MachineInstr *FOpMI = MIB;
MIB = BuildMI(*BB, MI, DL, TII->get(MOp));
for (int i = 0; i < X86::AddrNumOperands; ++i)
MIB.add(MI.getOperand(i));
MIB.addReg(FOpMI->getOperand(0).getReg(), RegState::Kill);
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
const BasicBlock *LLVM_BB = BB->getBasicBlock();
assert(MF->shouldSplitStack());
const bool Is64Bit = Subtarget.is64Bit();
const bool IsLP64 = Subtarget.isTarget64BitLP64();
const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
// BB:
// ... [Till the alloca]
// If stacklet is not large enough, jump to mallocMBB
//
// bumpMBB:
// Allocate by subtracting from RSP
// Jump to continueMBB
//
// mallocMBB:
// Allocate by call to runtime
//
// continueMBB:
// ...
// [rest of original BB]
//
MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
MachineRegisterInfo &MRI = MF->getRegInfo();
const TargetRegisterClass *AddrRegClass =
getRegClassFor(getPointerTy(MF->getDataLayout()));
unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
sizeVReg = MI.getOperand(1).getReg(),
physSPReg =
IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
MachineFunction::iterator MBBIter = ++BB->getIterator();
MF->insert(MBBIter, bumpMBB);
MF->insert(MBBIter, mallocMBB);
MF->insert(MBBIter, continueMBB);
continueMBB->splice(continueMBB->begin(), BB,
std::next(MachineBasicBlock::iterator(MI)), BB->end());
continueMBB->transferSuccessorsAndUpdatePHIs(BB);
// Add code to the main basic block to check if the stack limit has been hit,
// and if so, jump to mallocMBB otherwise to bumpMBB.
BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
.addReg(tmpSPVReg).addReg(sizeVReg);
BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
.addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
.addReg(SPLimitVReg);
BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
// bumpMBB simply decreases the stack pointer, since we know the current
// stacklet has enough space.
BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
.addReg(SPLimitVReg);
BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
.addReg(SPLimitVReg);
BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
// Calls into a routine in libgcc to allocate more space from the heap.
const uint32_t *RegMask =
Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
if (IsLP64) {
BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
.addReg(sizeVReg);
BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
.addExternalSymbol("__morestack_allocate_stack_space")
.addRegMask(RegMask)
.addReg(X86::RDI, RegState::Implicit)
.addReg(X86::RAX, RegState::ImplicitDefine);
} else if (Is64Bit) {
BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
.addReg(sizeVReg);
BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
.addExternalSymbol("__morestack_allocate_stack_space")
.addRegMask(RegMask)
.addReg(X86::EDI, RegState::Implicit)
.addReg(X86::EAX, RegState::ImplicitDefine);
} else {
BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
.addImm(12);
BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
.addExternalSymbol("__morestack_allocate_stack_space")
.addRegMask(RegMask)
.addReg(X86::EAX, RegState::ImplicitDefine);
}
if (!Is64Bit)
BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
.addImm(16);
BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
.addReg(IsLP64 ? X86::RAX : X86::EAX);
BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
// Set up the CFG correctly.
BB->addSuccessor(bumpMBB);
BB->addSuccessor(mallocMBB);
mallocMBB->addSuccessor(continueMBB);
bumpMBB->addSuccessor(continueMBB);
// Take care of the PHI nodes.
BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
MI.getOperand(0).getReg())
.addReg(mallocPtrVReg)
.addMBB(mallocMBB)
.addReg(bumpSPPtrVReg)
.addMBB(bumpMBB);
// Delete the original pseudo instruction.
MI.eraseFromParent();
// And we're done.
return continueMBB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
DebugLoc DL = MI.getDebugLoc();
assert(!isAsynchronousEHPersonality(
classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
"SEH does not use catchret!");
// Only 32-bit EH needs to worry about manually restoring stack pointers.
if (!Subtarget.is32Bit())
return BB;
// C++ EH creates a new target block to hold the restore code, and wires up
// the new block to the return destination with a normal JMP_4.
MachineBasicBlock *RestoreMBB =
MF->CreateMachineBasicBlock(BB->getBasicBlock());
assert(BB->succ_size() == 1);
MF->insert(std::next(BB->getIterator()), RestoreMBB);
RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
BB->addSuccessor(RestoreMBB);
MI.getOperand(0).setMBB(RestoreMBB);
auto RestoreMBBI = RestoreMBB->begin();
BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
const Constant *PerFn = MF->getFunction().getPersonalityFn();
bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
// Only 32-bit SEH requires special handling for catchpad.
if (IsSEH && Subtarget.is32Bit()) {
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
}
MI.eraseFromParent();
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
MachineBasicBlock *BB) const {
// So, here we replace TLSADDR with the sequence:
// adjust_stackdown -> TLSADDR -> adjust_stackup.
// We need this because TLSADDR is lowered into calls
// inside MC, therefore without the two markers shrink-wrapping
// may push the prologue/epilogue pass them.
const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
MachineFunction &MF = *BB->getParent();
// Emit CALLSEQ_START right before the instruction.
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
MachineInstrBuilder CallseqStart =
BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
// Emit CALLSEQ_END right after the instruction.
// We don't call erase from parent because we want to keep the
// original instruction around.
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
MachineInstrBuilder CallseqEnd =
BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
MachineBasicBlock *BB) const {
// This is pretty easy. We're taking the value that we received from
// our load from the relocation, sticking it in either RDI (x86-64)
// or EAX and doing an indirect call. The return value will then
// be in the normal return register.
MachineFunction *F = BB->getParent();
const X86InstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
assert(MI.getOperand(3).isGlobal() && "This should be a global");
// Get a register mask for the lowered call.
// FIXME: The 32-bit calls have non-standard calling conventions. Use a
// proper register mask.
const uint32_t *RegMask =
Subtarget.is64Bit() ?
Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
if (Subtarget.is64Bit()) {
MachineInstrBuilder MIB =
BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
.addReg(X86::RIP)
.addImm(0)
.addReg(0)
.addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
MI.getOperand(3).getTargetFlags())
.addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
addDirectMem(MIB, X86::RDI);
MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
} else if (!isPositionIndependent()) {
MachineInstrBuilder MIB =
BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
.addReg(0)
.addImm(0)
.addReg(0)
.addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
MI.getOperand(3).getTargetFlags())
.addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
addDirectMem(MIB, X86::EAX);
MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
} else {
MachineInstrBuilder MIB =
BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
.addReg(TII->getGlobalBaseReg(F))
.addImm(0)
.addReg(0)
.addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
MI.getOperand(3).getTargetFlags())
.addReg(0);
MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
addDirectMem(MIB, X86::EAX);
MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
}
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
static unsigned getOpcodeForRetpoline(unsigned RPOpc) {
switch (RPOpc) {
case X86::RETPOLINE_CALL32:
return X86::CALLpcrel32;
case X86::RETPOLINE_CALL64:
return X86::CALL64pcrel32;
case X86::RETPOLINE_TCRETURN32:
return X86::TCRETURNdi;
case X86::RETPOLINE_TCRETURN64:
return X86::TCRETURNdi64;
}
llvm_unreachable("not retpoline opcode");
}
static const char *getRetpolineSymbol(const X86Subtarget &Subtarget,
unsigned Reg) {
if (Subtarget.useRetpolineExternalThunk()) {
// When using an external thunk for retpolines, we pick names that match the
// names GCC happens to use as well. This helps simplify the implementation
// of the thunks for kernels where they have no easy ability to create
// aliases and are doing non-trivial configuration of the thunk's body. For
// example, the Linux kernel will do boot-time hot patching of the thunk
// bodies and cannot easily export aliases of these to loaded modules.
//
// Note that at any point in the future, we may need to change the semantics
// of how we implement retpolines and at that time will likely change the
// name of the called thunk. Essentially, there is no hard guarantee that
// LLVM will generate calls to specific thunks, we merely make a best-effort
// attempt to help out kernels and other systems where duplicating the
// thunks is costly.
switch (Reg) {
case X86::EAX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__x86_indirect_thunk_eax";
case X86::ECX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__x86_indirect_thunk_ecx";
case X86::EDX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__x86_indirect_thunk_edx";
case X86::EDI:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__x86_indirect_thunk_edi";
case X86::R11:
assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
return "__x86_indirect_thunk_r11";
}
llvm_unreachable("unexpected reg for retpoline");
}
// When targeting an internal COMDAT thunk use an LLVM-specific name.
switch (Reg) {
case X86::EAX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__llvm_retpoline_eax";
case X86::ECX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__llvm_retpoline_ecx";
case X86::EDX:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__llvm_retpoline_edx";
case X86::EDI:
assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
return "__llvm_retpoline_edi";
case X86::R11:
assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
return "__llvm_retpoline_r11";
}
llvm_unreachable("unexpected reg for retpoline");
}
MachineBasicBlock *
X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI,
MachineBasicBlock *BB) const {
// Copy the virtual register into the R11 physical register and
// call the retpoline thunk.
DebugLoc DL = MI.getDebugLoc();
const X86InstrInfo *TII = Subtarget.getInstrInfo();
unsigned CalleeVReg = MI.getOperand(0).getReg();
unsigned Opc = getOpcodeForRetpoline(MI.getOpcode());
// Find an available scratch register to hold the callee. On 64-bit, we can
// just use R11, but we scan for uses anyway to ensure we don't generate
// incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
// already a register use operand to the call to hold the callee. If none
// are available, use EDI instead. EDI is chosen because EBX is the PIC base
// register and ESI is the base pointer to realigned stack frames with VLAs.
SmallVector<unsigned, 3> AvailableRegs;
if (Subtarget.is64Bit())
AvailableRegs.push_back(X86::R11);
else
AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
// Zero out any registers that are already used.
for (const auto &MO : MI.operands()) {
if (MO.isReg() && MO.isUse())
for (unsigned &Reg : AvailableRegs)
if (Reg == MO.getReg())
Reg = 0;
}
// Choose the first remaining non-zero available register.
unsigned AvailableReg = 0;
for (unsigned MaybeReg : AvailableRegs) {
if (MaybeReg) {
AvailableReg = MaybeReg;
break;
}
}
if (!AvailableReg)
report_fatal_error("calling convention incompatible with retpoline, no "
"available registers");
const char *Symbol = getRetpolineSymbol(Subtarget, AvailableReg);
BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
.addReg(CalleeVReg);
MI.getOperand(0).ChangeToES(Symbol);
MI.setDesc(TII->get(Opc));
MachineInstrBuilder(*BB->getParent(), &MI)
.addReg(AvailableReg, RegState::Implicit | RegState::Kill);
return BB;
}
/// SetJmp implies future control flow change upon calling the corresponding
/// LongJmp.
/// Instead of using the 'return' instruction, the long jump fixes the stack and
/// performs an indirect branch. To do so it uses the registers that were stored
/// in the jump buffer (when calling SetJmp).
/// In case the shadow stack is enabled we need to fix it as well, because some
/// return addresses will be skipped.
/// The function will save the SSP for future fixing in the function
/// emitLongJmpShadowStackFix.
/// \sa emitLongJmpShadowStackFix
/// \param [in] MI The temporary Machine Instruction for the builtin.
/// \param [in] MBB The Machine Basic Block that will be modified.
void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
MachineInstrBuilder MIB;
// Memory Reference.
MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
// Initialize a register with zero.
MVT PVT = getPointerTy(MF->getDataLayout());
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
unsigned ZReg = MRI.createVirtualRegister(PtrRC);
unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
.addDef(ZReg)
.addReg(ZReg, RegState::Undef)
.addReg(ZReg, RegState::Undef);
// Read the current SSP Register value to the zeroed register.
unsigned SSPCopyReg = MRI.createVirtualRegister(PtrRC);
unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
// Write the SSP register value to offset 3 in input memory buffer.
unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
const int64_t SSPOffset = 3 * PVT.getStoreSize();
const unsigned MemOpndSlot = 1;
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
else
MIB.add(MI.getOperand(MemOpndSlot + i));
}
MIB.addReg(SSPCopyReg);
MIB.setMemRefs(MMOBegin, MMOEnd);
}
MachineBasicBlock *
X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
const BasicBlock *BB = MBB->getBasicBlock();
MachineFunction::iterator I = ++MBB->getIterator();
// Memory Reference
MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
unsigned DstReg;
unsigned MemOpndSlot = 0;
unsigned CurOp = 0;
DstReg = MI.getOperand(CurOp++).getReg();
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
(void)TRI;
unsigned mainDstReg = MRI.createVirtualRegister(RC);
unsigned restoreDstReg = MRI.createVirtualRegister(RC);
MemOpndSlot = CurOp;
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
// For v = setjmp(buf), we generate
//
// thisMBB:
// buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
// SjLjSetup restoreMBB
//
// mainMBB:
// v_main = 0
//
// sinkMBB:
// v = phi(main, restore)
//
// restoreMBB:
// if base pointer being used, load it from frame
// v_restore = 1
MachineBasicBlock *thisMBB = MBB;
MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
MF->insert(I, mainMBB);
MF->insert(I, sinkMBB);
MF->push_back(restoreMBB);
restoreMBB->setHasAddressTaken();
MachineInstrBuilder MIB;
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), MBB,
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
// thisMBB:
unsigned PtrStoreOpc = 0;
unsigned LabelReg = 0;
const int64_t LabelOffset = 1 * PVT.getStoreSize();
bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
!isPositionIndependent();
// Prepare IP either in reg or imm.
if (!UseImmLabel) {
PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
LabelReg = MRI.createVirtualRegister(PtrRC);
if (Subtarget.is64Bit()) {
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
.addReg(X86::RIP)
.addImm(0)
.addReg(0)
.addMBB(restoreMBB)
.addReg(0);
} else {
const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
.addReg(XII->getGlobalBaseReg(MF))
.addImm(0)
.addReg(0)
.addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
.addReg(0);
}
} else
PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
// Store IP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
else
MIB.add(MI.getOperand(MemOpndSlot + i));
}
if (!UseImmLabel)
MIB.addReg(LabelReg);
else
MIB.addMBB(restoreMBB);
MIB.setMemRefs(MMOBegin, MMOEnd);
if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
emitSetJmpShadowStackFix(MI, thisMBB);
}
// Setup
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
.addMBB(restoreMBB);
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
MIB.addRegMask(RegInfo->getNoPreservedMask());
thisMBB->addSuccessor(mainMBB);
thisMBB->addSuccessor(restoreMBB);
// mainMBB:
// EAX = 0
BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
mainMBB->addSuccessor(sinkMBB);
// sinkMBB:
BuildMI(*sinkMBB, sinkMBB->begin(), DL,
TII->get(X86::PHI), DstReg)
.addReg(mainDstReg).addMBB(mainMBB)
.addReg(restoreDstReg).addMBB(restoreMBB);
// restoreMBB:
if (RegInfo->hasBasePointer(*MF)) {
const bool Uses64BitFramePtr =
Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
X86FI->setRestoreBasePointer(MF);
unsigned FramePtr = RegInfo->getFrameRegister(*MF);
unsigned BasePtr = RegInfo->getBaseRegister();
unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
FramePtr, true, X86FI->getRestoreBasePointerOffset())
.setMIFlag(MachineInstr::FrameSetup);
}
BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
restoreMBB->addSuccessor(sinkMBB);
MI.eraseFromParent();
return sinkMBB;
}
/// Fix the shadow stack using the previously saved SSP pointer.
/// \sa emitSetJmpShadowStackFix
/// \param [in] MI The temporary Machine Instruction for the builtin.
/// \param [in] MBB The Machine Basic Block that will be modified.
/// \return The sink MBB that will perform the future indirect branch.
MachineBasicBlock *
X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
MVT PVT = getPointerTy(MF->getDataLayout());
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
// checkSspMBB:
// xor vreg1, vreg1
// rdssp vreg1
// test vreg1, vreg1
// je sinkMBB # Jump if Shadow Stack is not supported
// fallMBB:
// mov buf+24/12(%rip), vreg2
// sub vreg1, vreg2
// jbe sinkMBB # No need to fix the Shadow Stack
// fixShadowMBB:
// shr 3/2, vreg2
// incssp vreg2 # fix the SSP according to the lower 8 bits
// shr 8, vreg2
// je sinkMBB
// fixShadowLoopPrepareMBB:
// shl vreg2
// mov 128, vreg3
// fixShadowLoopMBB:
// incssp vreg3
// dec vreg2
// jne fixShadowLoopMBB # Iterate until you finish fixing
// # the Shadow Stack
// sinkMBB:
MachineFunction::iterator I = ++MBB->getIterator();
const BasicBlock *BB = MBB->getBasicBlock();
MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
MF->insert(I, checkSspMBB);
MF->insert(I, fallMBB);
MF->insert(I, fixShadowMBB);
MF->insert(I, fixShadowLoopPrepareMBB);
MF->insert(I, fixShadowLoopMBB);
MF->insert(I, sinkMBB);
// Transfer the remainder of BB and its successor edges to sinkMBB.
sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
MBB->end());
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
MBB->addSuccessor(checkSspMBB);
// Initialize a register with zero.
unsigned ZReg = MRI.createVirtualRegister(PtrRC);
unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
BuildMI(checkSspMBB, DL, TII->get(XorRROpc))
.addDef(ZReg)
.addReg(ZReg, RegState::Undef)
.addReg(ZReg, RegState::Undef);
// Read the current SSP Register value to the zeroed register.
unsigned SSPCopyReg = MRI.createVirtualRegister(PtrRC);
unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
// Check whether the result of the SSP register is zero and jump directly
// to the sink.
unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
.addReg(SSPCopyReg)
.addReg(SSPCopyReg);
BuildMI(checkSspMBB, DL, TII->get(X86::JE_1)).addMBB(sinkMBB);
checkSspMBB->addSuccessor(sinkMBB);
checkSspMBB->addSuccessor(fallMBB);
// Reload the previously saved SSP register value.
unsigned PrevSSPReg = MRI.createVirtualRegister(PtrRC);
unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
const int64_t SPPOffset = 3 * PVT.getStoreSize();
MachineInstrBuilder MIB =
BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
MIB.addDisp(MI.getOperand(i), SPPOffset);
else
MIB.add(MI.getOperand(i));
}
MIB.setMemRefs(MMOBegin, MMOEnd);
// Subtract the current SSP from the previous SSP.
unsigned SspSubReg = MRI.createVirtualRegister(PtrRC);
unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
.addReg(PrevSSPReg)
.addReg(SSPCopyReg);
// Jump to sink in case PrevSSPReg <= SSPCopyReg.
BuildMI(fallMBB, DL, TII->get(X86::JBE_1)).addMBB(sinkMBB);
fallMBB->addSuccessor(sinkMBB);
fallMBB->addSuccessor(fixShadowMBB);
// Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
unsigned SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
.addReg(SspSubReg)
.addImm(Offset);
// Increase SSP when looking only on the lower 8 bits of the delta.
unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
// Reset the lower 8 bits.
unsigned SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
.addReg(SspFirstShrReg)
.addImm(8);
// Jump if the result of the shift is zero.
BuildMI(fixShadowMBB, DL, TII->get(X86::JE_1)).addMBB(sinkMBB);
fixShadowMBB->addSuccessor(sinkMBB);
fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
// Do a single shift left.
unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
unsigned SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
.addReg(SspSecondShrReg);
// Save the value 128 to a register (will be used next with incssp).
unsigned Value128InReg = MRI.createVirtualRegister(PtrRC);
unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
.addImm(128);
fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
// Since incssp only looks at the lower 8 bits, we might need to do several
// iterations of incssp until we finish fixing the shadow stack.
unsigned DecReg = MRI.createVirtualRegister(PtrRC);
unsigned CounterReg = MRI.createVirtualRegister(PtrRC);
BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
.addReg(SspAfterShlReg)
.addMBB(fixShadowLoopPrepareMBB)
.addReg(DecReg)
.addMBB(fixShadowLoopMBB);
// Every iteration we increase the SSP by 128.
BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
// Every iteration we decrement the counter by 1.
unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
// Jump if the counter is not zero yet.
BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JNE_1)).addMBB(fixShadowLoopMBB);
fixShadowLoopMBB->addSuccessor(sinkMBB);
fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
return sinkMBB;
}
MachineBasicBlock *
X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
MachineBasicBlock *MBB) const {
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo &MRI = MF->getRegInfo();
// Memory Reference
MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
"Invalid Pointer Size!");
const TargetRegisterClass *RC =
(PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
unsigned Tmp = MRI.createVirtualRegister(RC);
// Since FP is only updated here but NOT referenced, it's treated as GPR.
const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
unsigned SP = RegInfo->getStackRegister();
MachineInstrBuilder MIB;
const int64_t LabelOffset = 1 * PVT.getStoreSize();
const int64_t SPOffset = 2 * PVT.getStoreSize();
unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
MachineBasicBlock *thisMBB = MBB;
// When CET and shadow stack is enabled, we need to fix the Shadow Stack.
if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
}
// Reload FP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
MIB.add(MI.getOperand(i));
MIB.setMemRefs(MMOBegin, MMOEnd);
// Reload IP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
MIB.addDisp(MI.getOperand(i), LabelOffset);
else
MIB.add(MI.getOperand(i));
}
MIB.setMemRefs(MMOBegin, MMOEnd);
// Reload SP
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
if (i == X86::AddrDisp)
MIB.addDisp(MI.getOperand(i), SPOffset);
else
MIB.add(MI.getOperand(i));
}
MIB.setMemRefs(MMOBegin, MMOEnd);
// Jump
BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
MI.eraseFromParent();
return thisMBB;
}
void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
MachineBasicBlock *MBB,
MachineBasicBlock *DispatchBB,
int FI) const {
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = MBB->getParent();
MachineRegisterInfo *MRI = &MF->getRegInfo();
const X86InstrInfo *TII = Subtarget.getInstrInfo();
MVT PVT = getPointerTy(MF->getDataLayout());
assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
unsigned Op = 0;
unsigned VR = 0;
bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
!isPositionIndependent();
if (UseImmLabel) {
Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
} else {
const TargetRegisterClass *TRC =
(PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
VR = MRI->createVirtualRegister(TRC);
Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
if (Subtarget.is64Bit())
BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
.addReg(X86::RIP)
.addImm(1)
.addReg(0)
.addMBB(DispatchBB)
.addReg(0);
else
BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
.addReg(0) /* TII->getGlobalBaseReg(MF) */
.addImm(1)
.addReg(0)
.addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
.addReg(0);
}
MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
if (UseImmLabel)
MIB.addMBB(DispatchBB);
else
MIB.addReg(VR);
}
MachineBasicBlock *
X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
MachineBasicBlock *BB) const {
DebugLoc DL = MI.getDebugLoc();
MachineFunction *MF = BB->getParent();
MachineFrameInfo &MFI = MF->getFrameInfo();
MachineRegisterInfo *MRI = &MF->getRegInfo();
const X86InstrInfo *TII = Subtarget.getInstrInfo();
int FI = MFI.getFunctionContextIndex();
// Get a mapping of the call site numbers to all of the landing pads they're
// associated with.
DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
unsigned MaxCSNum = 0;
for (auto &MBB : *MF) {
if (!MBB.isEHPad())
continue;
MCSymbol *Sym = nullptr;
for (const auto &MI : MBB) {
if (MI.isDebugInstr())
continue;
assert(MI.isEHLabel() && "expected EH_LABEL");
Sym = MI.getOperand(0).getMCSymbol();
break;
}
if (!MF->hasCallSiteLandingPad(Sym))
continue;
for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
CallSiteNumToLPad[CSI].push_back(&MBB);
MaxCSNum = std::max(MaxCSNum, CSI);
}
}
// Get an ordered list of the machine basic blocks for the jump table.
std::vector<MachineBasicBlock *> LPadList;
SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
LPadList.reserve(CallSiteNumToLPad.size());
for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
for (auto &LP : CallSiteNumToLPad[CSI]) {
LPadList.push_back(LP);
InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
}
}
assert(!LPadList.empty() &&
"No landing pad destinations for the dispatch jump table!");
// Create the MBBs for the dispatch code.
// Shove the dispatch's address into the return slot in the function context.
MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
DispatchBB->setIsEHPad(true);
MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
BuildMI(TrapBB, DL, TII->get(X86::TRAP));
DispatchBB->addSuccessor(TrapBB);
MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
DispatchBB->addSuccessor(DispContBB);
// Insert MBBs.
MF->push_back(DispatchBB);
MF->push_back(DispContBB);
MF->push_back(TrapBB);
// Insert code into the entry block that creates and registers the function
// context.
SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
// Create the jump table and associated information
unsigned JTE = getJumpTableEncoding();
MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
unsigned MJTI = JTI->createJumpTableIndex(LPadList);
const X86RegisterInfo &RI = TII->getRegisterInfo();
// Add a register mask with no preserved registers. This results in all
// registers being marked as clobbered.
if (RI.hasBasePointer(*MF)) {
const bool FPIs64Bit =
Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
MFI->setRestoreBasePointer(MF);
unsigned FP = RI.getFrameRegister(*MF);
unsigned BP = RI.getBaseRegister();
unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
MFI->getRestoreBasePointerOffset())
.addRegMask(RI.getNoPreservedMask());
} else {
BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
.addRegMask(RI.getNoPreservedMask());
}
// IReg is used as an index in a memory operand and therefore can't be SP
unsigned IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
Subtarget.is64Bit() ? 8 : 4);
BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
.addReg(IReg)
.addImm(LPadList.size());
BuildMI(DispatchBB, DL, TII->get(X86::JAE_1)).addMBB(TrapBB);
if (Subtarget.is64Bit()) {
unsigned BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
unsigned IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
// leaq .LJTI0_0(%rip), BReg
BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
.addReg(X86::RIP)
.addImm(1)
.addReg(0)
.addJumpTableIndex(MJTI)
.addReg(0);
// movzx IReg64, IReg
BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
.addImm(0)
.addReg(IReg)
.addImm(X86::sub_32bit);
switch (JTE) {
case MachineJumpTableInfo::EK_BlockAddress:
// jmpq *(BReg,IReg64,8)
BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
.addReg(BReg)
.addImm(8)
.addReg(IReg64)
.addImm(0)
.addReg(0);
break;
case MachineJumpTableInfo::EK_LabelDifference32: {
unsigned OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
unsigned OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
unsigned TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
// movl (BReg,IReg64,4), OReg
BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
.addReg(BReg)
.addImm(4)
.addReg(IReg64)
.addImm(0)
.addReg(0);
// movsx OReg64, OReg
BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
// addq BReg, OReg64, TReg
BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
.addReg(OReg64)
.addReg(BReg);
// jmpq *TReg
BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
break;
}
default:
llvm_unreachable("Unexpected jump table encoding");
}
} else {
// jmpl *.LJTI0_0(,IReg,4)
BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
.addReg(0)
.addImm(4)
.addReg(IReg)
.addJumpTableIndex(MJTI)
.addReg(0);
}
// Add the jump table entries as successors to the MBB.
SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
for (auto &LP : LPadList)
if (SeenMBBs.insert(LP).second)
DispContBB->addSuccessor(LP);
// N.B. the order the invoke BBs are processed in doesn't matter here.
SmallVector<MachineBasicBlock *, 64> MBBLPads;
const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
for (MachineBasicBlock *MBB : InvokeBBs) {
// Remove the landing pad successor from the invoke block and replace it
// with the new dispatch block.
// Keep a copy of Successors since it's modified inside the loop.
SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
MBB->succ_rend());
// FIXME: Avoid quadratic complexity.
for (auto MBBS : Successors) {
if (MBBS->isEHPad()) {
MBB->removeSuccessor(MBBS);
MBBLPads.push_back(MBBS);
}
}
MBB->addSuccessor(DispatchBB);
// Find the invoke call and mark all of the callee-saved registers as
// 'implicit defined' so that they're spilled. This prevents code from
// moving instructions to before the EH block, where they will never be
// executed.
for (auto &II : reverse(*MBB)) {
if (!II.isCall())
continue;
DenseMap<unsigned, bool> DefRegs;
for (auto &MOp : II.operands())
if (MOp.isReg())
DefRegs[MOp.getReg()] = true;
MachineInstrBuilder MIB(*MF, &II);
for (unsigned RI = 0; SavedRegs[RI]; ++RI) {
unsigned Reg = SavedRegs[RI];
if (!DefRegs[Reg])
MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
}
break;
}
}
// Mark all former landing pads as non-landing pads. The dispatch is the only
// landing pad now.
for (auto &LP : MBBLPads)
LP->setIsEHPad(false);
// The instruction is gone now.
MI.eraseFromParent();
return BB;
}
MachineBasicBlock *
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *BB) const {
MachineFunction *MF = BB->getParent();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
DebugLoc DL = MI.getDebugLoc();
switch (MI.getOpcode()) {
default: llvm_unreachable("Unexpected instr type to insert");
case X86::TLS_addr32:
case X86::TLS_addr64:
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
return EmitLoweredTLSAddr(MI, BB);
case X86::RETPOLINE_CALL32:
case X86::RETPOLINE_CALL64:
case X86::RETPOLINE_TCRETURN32:
case X86::RETPOLINE_TCRETURN64:
return EmitLoweredRetpoline(MI, BB);
case X86::CATCHRET:
return EmitLoweredCatchRet(MI, BB);
case X86::CATCHPAD:
return EmitLoweredCatchPad(MI, BB);
case X86::SEG_ALLOCA_32:
case X86::SEG_ALLOCA_64:
return EmitLoweredSegAlloca(MI, BB);
case X86::TLSCall_32:
case X86::TLSCall_64:
return EmitLoweredTLSCall(MI, BB);
case X86::CMOV_FR32:
case X86::CMOV_FR64:
case X86::CMOV_F128:
case X86::CMOV_GR8:
case X86::CMOV_GR16:
case X86::CMOV_GR32:
case X86::CMOV_RFP32:
case X86::CMOV_RFP64:
case X86::CMOV_RFP80:
case X86::CMOV_V2F64:
case X86::CMOV_V2I64:
case X86::CMOV_V4F32:
case X86::CMOV_V4F64:
case X86::CMOV_V4I64:
case X86::CMOV_V16F32:
case X86::CMOV_V8F32:
case X86::CMOV_V8F64:
case X86::CMOV_V8I64:
case X86::CMOV_V8I1:
case X86::CMOV_V16I1:
case X86::CMOV_V32I1:
case X86::CMOV_V64I1:
return EmitLoweredSelect(MI, BB);
case X86::RDFLAGS32:
case X86::RDFLAGS64: {
unsigned PushF =
MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
// Permit reads of the EFLAGS and DF registers without them being defined.
// This intrinsic exists to read external processor state in flags, such as
// the trap flag, interrupt flag, and direction flag, none of which are
// modeled by the backend.
assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
"Unexpected register in operand!");
Push->getOperand(2).setIsUndef();
assert(Push->getOperand(3).getReg() == X86::DF &&
"Unexpected register in operand!");
Push->getOperand(3).setIsUndef();
BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
case X86::WRFLAGS32:
case X86::WRFLAGS64: {
unsigned Push =
MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
unsigned PopF =
MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
BuildMI(*BB, MI, DL, TII->get(PopF));
MI.eraseFromParent(); // The pseudo is gone now.
return BB;
}
case X86::RELEASE_FADD32mr:
case X86::RELEASE_FADD64mr:
return EmitLoweredAtomicFP(MI, BB);
case X86::FP32_TO_INT16_IN_MEM:
case X86::FP32_TO_INT32_IN_MEM:
case X86::FP32_TO_INT64_IN_MEM:
case X86::FP64_TO_INT16_IN_MEM:
case X86::FP64_TO_INT32_IN_MEM:
case X86::FP64_TO_INT64_IN_MEM:
case X86::FP80_TO_INT16_IN_MEM:
case X86::FP80_TO_INT32_IN_MEM:
case X86::FP80_TO_INT64_IN_MEM: {
// Change the floating point control register to use "round towards zero"
// mode when truncating to an integer value.
int CWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
addFrameReference(BuildMI(*BB, MI, DL,
TII->get(X86::FNSTCW16m)), CWFrameIdx);
// Load the old value of the high byte of the control word...
unsigned OldCW =
MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
CWFrameIdx);
// Set the high part to be round to zero...
addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
.addImm(0xC7F);
// Reload the modified control word now...
addFrameReference(BuildMI(*BB, MI, DL,
TII->get(X86::FLDCW16m)), CWFrameIdx);
// Restore the memory image of control word to original value
addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
.addReg(OldCW);
// Get the X86 opcode to use.
unsigned Opc;
switch (MI.getOpcode()) {
default: llvm_unreachable("illegal opcode!");
case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
}
X86AddressMode AM = getAddressFromInstr(&MI, 0);
addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
.addReg(MI.getOperand(X86::AddrNumOperands).getReg());
// Reload the original control word now.
addFrameReference(BuildMI(*BB, MI, DL,
TII->get(X86::FLDCW16m)), CWFrameIdx);
MI.eraseFromParent(); // The pseudo instruction is gone now.
return BB;
}
// Thread synchronization.
case X86::MONITOR:
return emitMonitor(MI, BB, Subtarget, X86::MONITORrrr);
case X86::MONITORX:
return emitMonitor(MI, BB, Subtarget, X86::MONITORXrrr);
// Cache line zero
case X86::CLZERO:
return emitClzero(&MI, BB, Subtarget);
// PKU feature
case X86::WRPKRU:
return emitWRPKRU(MI, BB, Subtarget);
case X86::RDPKRU:
return emitRDPKRU(MI, BB, Subtarget);
// xbegin
case X86::XBEGIN:
return emitXBegin(MI, BB, Subtarget.getInstrInfo());
case X86::VASTART_SAVE_XMM_REGS:
return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
case X86::VAARG_64:
return EmitVAARG64WithCustomInserter(MI, BB);
case X86::EH_SjLj_SetJmp32:
case X86::EH_SjLj_SetJmp64:
return emitEHSjLjSetJmp(MI, BB);
case X86::EH_SjLj_LongJmp32:
case X86::EH_SjLj_LongJmp64:
return emitEHSjLjLongJmp(MI, BB);
case X86::Int_eh_sjlj_setup_dispatch:
return EmitSjLjDispatchBlock(MI, BB);
case TargetOpcode::STATEPOINT:
// As an implementation detail, STATEPOINT shares the STACKMAP format at
// this point in the process. We diverge later.
return emitPatchPoint(MI, BB);
case TargetOpcode::STACKMAP:
case TargetOpcode::PATCHPOINT:
return emitPatchPoint(MI, BB);
case TargetOpcode::PATCHABLE_EVENT_CALL:
return emitXRayCustomEvent(MI, BB);
case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
return emitXRayTypedEvent(MI, BB);
case X86::LCMPXCHG8B: {
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
// In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
// requires a memory operand. If it happens that current architecture is
// i686 and for current function we need a base pointer
// - which is ESI for i686 - register allocator would not be able to
// allocate registers for an address in form of X(%reg, %reg, Y)
// - there never would be enough unreserved registers during regalloc
// (without the need for base ptr the only option would be X(%edi, %esi, Y).
// We are giving a hand to register allocator by precomputing the address in
// a new vreg using LEA.
// If it is not i686 or there is no base pointer - nothing to do here.
if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
return BB;
// Even though this code does not necessarily needs the base pointer to
// be ESI, we check for that. The reason: if this assert fails, there are
// some changes happened in the compiler base pointer handling, which most
// probably have to be addressed somehow here.
assert(TRI->getBaseRegister() == X86::ESI &&
"LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
"base pointer in mind");
MachineRegisterInfo &MRI = MF->getRegInfo();
MVT SPTy = getPointerTy(MF->getDataLayout());
const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
unsigned computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
X86AddressMode AM = getAddressFromInstr(&MI, 0);
// Regalloc does not need any help when the memory operand of CMPXCHG8B
// does not use index register.
if (AM.IndexReg == X86::NoRegister)
return BB;
// After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
// four operand definitions that are E[ABCD] registers. We skip them and
// then insert the LEA.
MachineBasicBlock::iterator MBBI(MI);
while (MBBI->definesRegister(X86::EAX) || MBBI->definesRegister(X86::EBX) ||
MBBI->definesRegister(X86::ECX) || MBBI->definesRegister(X86::EDX))
--MBBI;
addFullAddress(
BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
setDirectAddressInInstr(&MI, 0, computedAddrVReg);
return BB;
}
case X86::LCMPXCHG16B:
return BB;
case X86::LCMPXCHG8B_SAVE_EBX:
case X86::LCMPXCHG16B_SAVE_RBX: {
unsigned BasePtr =
MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
if (!BB->isLiveIn(BasePtr))
BB->addLiveIn(BasePtr);
return BB;
}
}
}
//===----------------------------------------------------------------------===//
// X86 Optimization Hooks
//===----------------------------------------------------------------------===//
bool
X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
const APInt &Demanded,
TargetLoweringOpt &TLO) const {
// Only optimize Ands to prevent shrinking a constant that could be
// matched by movzx.
if (Op.getOpcode() != ISD::AND)
return false;
EVT VT = Op.getValueType();
// Ignore vectors.
if (VT.isVector())
return false;
unsigned Size = VT.getSizeInBits();
// Make sure the RHS really is a constant.
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
if (!C)
return false;
const APInt &Mask = C->getAPIntValue();
// Clear all non-demanded bits initially.
APInt ShrunkMask = Mask & Demanded;
// Find the width of the shrunk mask.
unsigned Width = ShrunkMask.getActiveBits();
// If the mask is all 0s there's nothing to do here.
if (Width == 0)
return false;
// Find the next power of 2 width, rounding up to a byte.
Width = PowerOf2Ceil(std::max(Width, 8U));
// Truncate the width to size to handle illegal types.
Width = std::min(Width, Size);
// Calculate a possible zero extend mask for this constant.
APInt ZeroExtendMask = APInt::getLowBitsSet(Size, Width);
// If we aren't changing the mask, just return true to keep it and prevent
// the caller from optimizing.
if (ZeroExtendMask == Mask)
return true;
// Make sure the new mask can be represented by a combination of mask bits
// and non-demanded bits.
if (!ZeroExtendMask.isSubsetOf(Mask | ~Demanded))
return false;
// Replace the constant with the zero extend mask.
SDLoc DL(Op);
SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
return TLO.CombineTo(Op, NewOp);
}
void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
unsigned BitWidth = Known.getBitWidth();
unsigned Opc = Op.getOpcode();
EVT VT = Op.getValueType();
assert((Opc >= ISD::BUILTIN_OP_END ||
Opc == ISD::INTRINSIC_WO_CHAIN ||
Opc == ISD::INTRINSIC_W_CHAIN ||
Opc == ISD::INTRINSIC_VOID) &&
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
Known.resetAll();
switch (Opc) {
default: break;
case X86ISD::SETCC:
Known.Zero.setBitsFrom(1);
break;
case X86ISD::MOVMSK: {
unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
Known.Zero.setBitsFrom(NumLoBits);
break;
}
case X86ISD::PEXTRB:
case X86ISD::PEXTRW: {
SDValue Src = Op.getOperand(0);
EVT SrcVT = Src.getValueType();
APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
Op.getConstantOperandVal(1));
DAG.computeKnownBits(Src, Known, DemandedElt, Depth + 1);
Known = Known.zextOrTrunc(BitWidth);
Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
break;
}
case X86ISD::VSHLI:
case X86ISD::VSRLI: {
if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
Known.setAllZero();
break;
}
DAG.computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
unsigned ShAmt = ShiftImm->getZExtValue();
if (Opc == X86ISD::VSHLI) {
Known.Zero <<= ShAmt;
Known.One <<= ShAmt;
// Low bits are known zero.
Known.Zero.setLowBits(ShAmt);
} else {
Known.Zero.lshrInPlace(ShAmt);
Known.One.lshrInPlace(ShAmt);
// High bits are known zero.
Known.Zero.setHighBits(ShAmt);
}
}
break;
}
case X86ISD::PACKUS: {
// PACKUS is just a truncation if the upper half is zero.
// TODO: Add DemandedElts support.
KnownBits Known2;
DAG.computeKnownBits(Op.getOperand(0), Known, Depth + 1);
DAG.computeKnownBits(Op.getOperand(1), Known2, Depth + 1);
Known.One &= Known2.One;
Known.Zero &= Known2.Zero;
if (Known.countMinLeadingZeros() < BitWidth)
Known.resetAll();
Known = Known.trunc(BitWidth);
break;
}
case X86ISD::VZEXT: {
// TODO: Add DemandedElts support.
SDValue N0 = Op.getOperand(0);
unsigned NumElts = VT.getVectorNumElements();
EVT SrcVT = N0.getValueType();
unsigned InNumElts = SrcVT.getVectorNumElements();
unsigned InBitWidth = SrcVT.getScalarSizeInBits();
assert(InNumElts >= NumElts && "Illegal VZEXT input");
Known = KnownBits(InBitWidth);
APInt DemandedSrcElts = APInt::getLowBitsSet(InNumElts, NumElts);
DAG.computeKnownBits(N0, Known, DemandedSrcElts, Depth + 1);
Known = Known.zext(BitWidth);
Known.Zero.setBitsFrom(InBitWidth);
break;
}
case X86ISD::CMOV: {
DAG.computeKnownBits(Op.getOperand(1), Known, Depth+1);
// If we don't know any bits, early out.
if (Known.isUnknown())
break;
KnownBits Known2;
DAG.computeKnownBits(Op.getOperand(0), Known2, Depth+1);
// Only known if known in both the LHS and RHS.
Known.One &= Known2.One;
Known.Zero &= Known2.Zero;
break;
}
case X86ISD::UDIVREM8_ZEXT_HREG:
// TODO: Support more than just the zero extended bits?
if (Op.getResNo() != 1)
break;
// The remainder is zero extended.
Known.Zero.setBitsFrom(8);
break;
}
// Handle target shuffles.
// TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
if (isTargetShuffle(Opc)) {
bool IsUnary;
SmallVector<int, 64> Mask;
SmallVector<SDValue, 2> Ops;
if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
IsUnary)) {
unsigned NumOps = Ops.size();
unsigned NumElts = VT.getVectorNumElements();
if (Mask.size() == NumElts) {
SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
Known.Zero.setAllBits(); Known.One.setAllBits();
for (unsigned i = 0; i != NumElts; ++i) {
if (!DemandedElts[i])
continue;
int M = Mask[i];
if (M == SM_SentinelUndef) {
// For UNDEF elements, we don't know anything about the common state
// of the shuffle result.
Known.resetAll();
break;
} else if (M == SM_SentinelZero) {
Known.One.clearAllBits();
continue;
}
assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
"Shuffle index out of range");
unsigned OpIdx = (unsigned)M / NumElts;
unsigned EltIdx = (unsigned)M % NumElts;
if (Ops[OpIdx].getValueType() != VT) {
// TODO - handle target shuffle ops with different value types.
Known.resetAll();
break;
}
DemandedOps[OpIdx].setBit(EltIdx);
}
// Known bits are the values that are shared by every demanded element.
for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
if (!DemandedOps[i])
continue;
KnownBits Known2;
DAG.computeKnownBits(Ops[i], Known2, DemandedOps[i], Depth + 1);
Known.One &= Known2.One;
Known.Zero &= Known2.Zero;
}
}
}
}
}
unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
unsigned Depth) const {
unsigned VTBits = Op.getScalarValueSizeInBits();
unsigned Opcode = Op.getOpcode();
switch (Opcode) {
case X86ISD::SETCC_CARRY:
// SETCC_CARRY sets the dest to ~0 for true or 0 for false.
return VTBits;
case X86ISD::VSEXT: {
// TODO: Add DemandedElts support.
SDValue Src = Op.getOperand(0);
unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
Tmp += VTBits - Src.getScalarValueSizeInBits();
return Tmp;
}
case X86ISD::VTRUNC: {
// TODO: Add DemandedElts support.
SDValue Src = Op.getOperand(0);
unsigned NumSrcBits = Src.getScalarValueSizeInBits();
assert(VTBits < NumSrcBits && "Illegal truncation input type");
unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
if (Tmp > (NumSrcBits - VTBits))
return Tmp - (NumSrcBits - VTBits);
return 1;
}
case X86ISD::PACKSS: {
// PACKSS is just a truncation if the sign bits extend to the packed size.
// TODO: Add DemandedElts support.
unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth + 1);
unsigned Tmp = std::min(Tmp0, Tmp1);
if (Tmp > (SrcBits - VTBits))
return Tmp - (SrcBits - VTBits);
return 1;
}
case X86ISD::VSHLI: {
SDValue Src = Op.getOperand(0);
APInt ShiftVal = cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue();
if (ShiftVal.uge(VTBits))
return VTBits; // Shifted all bits out --> zero.
unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
if (ShiftVal.uge(Tmp))
return 1; // Shifted all sign bits out --> unknown.
return Tmp - ShiftVal.getZExtValue();
}
case X86ISD::VSRAI: {
SDValue Src = Op.getOperand(0);
APInt ShiftVal = cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue();
if (ShiftVal.uge(VTBits - 1))
return VTBits; // Sign splat.
unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
ShiftVal += Tmp;
return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
}
case X86ISD::PCMPGT:
case X86ISD::PCMPEQ:
case X86ISD::CMPP:
case X86ISD::VPCOM:
case X86ISD::VPCOMU:
// Vector compares return zero/all-bits result values.
return VTBits;
case X86ISD::CMOV: {
unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
if (Tmp0 == 1) return 1; // Early out.
unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
return std::min(Tmp0, Tmp1);
}
case X86ISD::SDIVREM8_SEXT_HREG:
// TODO: Support more than just the sign extended bits?
if (Op.getResNo() != 1)
break;
// The remainder is sign extended.
return VTBits - 7;
}
// Fallback case.
return 1;
}
SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
return N->getOperand(0);
return N;
}
/// Returns true (and the GlobalValue and the offset) if the node is a
/// GlobalAddress + offset.
bool X86TargetLowering::isGAPlusOffset(SDNode *N,
const GlobalValue* &GA,
int64_t &Offset) const {
if (N->getOpcode() == X86ISD::Wrapper) {
if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
return true;
}
}
return TargetLowering::isGAPlusOffset(N, GA, Offset);
}
// Attempt to match a combined shuffle mask against supported unary shuffle
// instructions.
// TODO: Investigate sharing more of this with shuffle lowering.
static bool matchUnaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
bool AllowFloatDomain, bool AllowIntDomain,
SDValue &V1, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget,
unsigned &Shuffle, MVT &SrcVT, MVT &DstVT) {
unsigned NumMaskElts = Mask.size();
unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
// Match against a VZEXT_MOVL vXi32 zero-extending instruction.
if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) &&
isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) {
Shuffle = X86ISD::VZEXT_MOVL;
SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
return true;
}
// Match against a ZERO_EXTEND_VECTOR_INREG/VZEXT instruction.
// TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
(MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
unsigned MaxScale = 64 / MaskEltSize;
for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
bool Match = true;
unsigned NumDstElts = NumMaskElts / Scale;
for (unsigned i = 0; i != NumDstElts && Match; ++i) {
Match &= isUndefOrEqual(Mask[i * Scale], (int)i);
Match &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
}
if (Match) {
unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
MVT::getIntegerVT(MaskEltSize);
SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits()) {
V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
Shuffle = unsigned(X86ISD::VZEXT);
} else
Shuffle = unsigned(ISD::ZERO_EXTEND_VECTOR_INREG);
DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
DstVT = MVT::getVectorVT(DstVT, NumDstElts);
return true;
}
}
}
// Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
isUndefOrEqual(Mask[0], 0) &&
isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
Shuffle = X86ISD::VZEXT_MOVL;
SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
return true;
}
// Check if we have SSE3 which will let us use MOVDDUP etc. The
// instructions are no slower than UNPCKLPD but has the option to
// fold the input operand into even an unaligned memory load.
if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
if (!Subtarget.hasAVX2() && isTargetShuffleEquivalent(Mask, {0, 0})) {
Shuffle = X86ISD::MOVDDUP;
SrcVT = DstVT = MVT::v2f64;
return true;
}
if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
Shuffle = X86ISD::MOVSLDUP;
SrcVT = DstVT = MVT::v4f32;
return true;
}
if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
Shuffle = X86ISD::MOVSHDUP;
SrcVT = DstVT = MVT::v4f32;
return true;
}
}
if (MaskVT.is256BitVector() && AllowFloatDomain) {
assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
Shuffle = X86ISD::MOVDDUP;
SrcVT = DstVT = MVT::v4f64;
return true;
}
if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
Shuffle = X86ISD::MOVSLDUP;
SrcVT = DstVT = MVT::v8f32;
return true;
}
if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
Shuffle = X86ISD::MOVSHDUP;
SrcVT = DstVT = MVT::v8f32;
return true;
}
}
if (MaskVT.is512BitVector() && AllowFloatDomain) {
assert(Subtarget.hasAVX512() &&
"AVX512 required for 512-bit vector shuffles");
if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
Shuffle = X86ISD::MOVDDUP;
SrcVT = DstVT = MVT::v8f64;
return true;
}
if (isTargetShuffleEquivalent(
Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
Shuffle = X86ISD::MOVSLDUP;
SrcVT = DstVT = MVT::v16f32;
return true;
}
if (isTargetShuffleEquivalent(
Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
Shuffle = X86ISD::MOVSHDUP;
SrcVT = DstVT = MVT::v16f32;
return true;
}
}
// Attempt to match against broadcast-from-vector.
if (Subtarget.hasAVX2()) {
SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
SrcVT = DstVT = MaskVT;
Shuffle = X86ISD::VBROADCAST;
return true;
}
}
return false;
}
// Attempt to match a combined shuffle mask against supported unary immediate
// permute instructions.
// TODO: Investigate sharing more of this with shuffle lowering.
static bool matchUnaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
const APInt &Zeroable,
bool AllowFloatDomain,
bool AllowIntDomain,
const X86Subtarget &Subtarget,
unsigned &Shuffle, MVT &ShuffleVT,
unsigned &PermuteImm) {
unsigned NumMaskElts = Mask.size();
unsigned InputSizeInBits = MaskVT.getSizeInBits();
unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
bool ContainsZeros =
llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
// Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
if (!ContainsZeros && MaskScalarSizeInBits == 64) {
// Check for lane crossing permutes.
if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
// PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
Shuffle = X86ISD::VPERMI;
ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
PermuteImm = getV4X86ShuffleImm(Mask);
return true;
}
if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
SmallVector<int, 4> RepeatedMask;
if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
Shuffle = X86ISD::VPERMI;
ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
PermuteImm = getV4X86ShuffleImm(RepeatedMask);
return true;
}
}
} else if (AllowFloatDomain && Subtarget.hasAVX()) {
// VPERMILPD can permute with a non-repeating shuffle.
Shuffle = X86ISD::VPERMILPI;
ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
PermuteImm = 0;
for (int i = 0, e = Mask.size(); i != e; ++i) {
int M = Mask[i];
if (M == SM_SentinelUndef)
continue;
assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
PermuteImm |= (M & 1) << i;
}
return true;
}
}
// Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
// AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
// had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
!ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
SmallVector<int, 4> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
// Narrow the repeated mask to create 32-bit element permutes.
SmallVector<int, 4> WordMask = RepeatedMask;
if (MaskScalarSizeInBits == 64)
scaleShuffleMask<int>(2, RepeatedMask, WordMask);
Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
PermuteImm = getV4X86ShuffleImm(WordMask);
return true;
}
}
// Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
SmallVector<int, 4> RepeatedMask;
if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
ArrayRef<int> LoMask(Mask.data() + 0, 4);
ArrayRef<int> HiMask(Mask.data() + 4, 4);
// PSHUFLW: permute lower 4 elements only.
if (isUndefOrInRange(LoMask, 0, 4) &&
isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
Shuffle = X86ISD::PSHUFLW;
ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
PermuteImm = getV4X86ShuffleImm(LoMask);
return true;
}
// PSHUFHW: permute upper 4 elements only.
if (isUndefOrInRange(HiMask, 4, 8) &&
isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
// Offset the HiMask so that we can create the shuffle immediate.
int OffsetHiMask[4];
for (int i = 0; i != 4; ++i)
OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
Shuffle = X86ISD::PSHUFHW;
ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
return true;
}
}
}
// Attempt to match against byte/bit shifts.
// FIXME: Add 512-bit support.
if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
(MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
int ShiftAmt = matchVectorShuffleAsShift(ShuffleVT, Shuffle,
MaskScalarSizeInBits, Mask,
0, Zeroable, Subtarget);
if (0 < ShiftAmt) {
PermuteImm = (unsigned)ShiftAmt;
return true;
}
}
return false;
}
// Attempt to match a combined unary shuffle mask against supported binary
// shuffle instructions.
// TODO: Investigate sharing more of this with shuffle lowering.
static bool matchBinaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask,
bool AllowFloatDomain, bool AllowIntDomain,
SDValue &V1, SDValue &V2, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget,
unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
bool IsUnary) {
unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
if (MaskVT.is128BitVector()) {
if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
V2 = V1;
V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
return true;
}
if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
V2 = V1;
Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
return true;
}
if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
(AllowFloatDomain || !Subtarget.hasSSE41())) {
std::swap(V1, V2);
Shuffle = X86ISD::MOVSD;
SrcVT = DstVT = MVT::v2f64;
return true;
}
if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
(AllowFloatDomain || !Subtarget.hasSSE41())) {
Shuffle = X86ISD::MOVSS;
SrcVT = DstVT = MVT::v4f32;
return true;
}
}
// Attempt to match against either a unary or binary PACKSS/PACKUS shuffle.
// TODO add support for 256/512-bit types.
if ((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) {
if (matchVectorShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
Subtarget)) {
DstVT = MaskVT;
return true;
}
}
// Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
(MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
(MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
(MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
(MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
if (matchVectorShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL,
DAG, Subtarget)) {
SrcVT = DstVT = MaskVT;
if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
return true;
}
}
return false;
}
static bool matchBinaryPermuteVectorShuffle(
MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
unsigned NumMaskElts = Mask.size();
unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
// Attempt to match against PALIGNR byte rotate.
if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
(MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
int ByteRotation = matchVectorShuffleAsByteRotate(MaskVT, V1, V2, Mask);
if (0 < ByteRotation) {
Shuffle = X86ISD::PALIGNR;
ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
PermuteImm = ByteRotation;
return true;
}
}
// Attempt to combine to X86ISD::BLENDI.
if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
(Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
(MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
uint64_t BlendMask = 0;
bool ForceV1Zero = false, ForceV2Zero = false;
SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
if (matchVectorShuffleAsBlend(V1, V2, TargetMask, ForceV1Zero, ForceV2Zero,
BlendMask)) {
if (MaskVT == MVT::v16i16) {
// We can only use v16i16 PBLENDW if the lanes are repeated.
SmallVector<int, 8> RepeatedMask;
if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
RepeatedMask)) {
assert(RepeatedMask.size() == 8 &&
"Repeated mask size doesn't match!");
PermuteImm = 0;
for (int i = 0; i < 8; ++i)
if (RepeatedMask[i] >= 8)
PermuteImm |= 1 << i;
V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
Shuffle = X86ISD::BLENDI;
ShuffleVT = MaskVT;
return true;
}
} else {
// Determine a type compatible with X86ISD::BLENDI.
ShuffleVT = MaskVT;
if (Subtarget.hasAVX2()) {
if (ShuffleVT == MVT::v4i64)
ShuffleVT = MVT::v8i32;
else if (ShuffleVT == MVT::v2i64)
ShuffleVT = MVT::v4i32;
} else {
if (ShuffleVT == MVT::v2i64 || ShuffleVT == MVT::v4i32)
ShuffleVT = MVT::v8i16;
else if (ShuffleVT == MVT::v4i64)
ShuffleVT = MVT::v4f64;
else if (ShuffleVT == MVT::v8i32)
ShuffleVT = MVT::v8f32;
}
if (!ShuffleVT.isFloatingPoint()) {
int Scale = EltSizeInBits / ShuffleVT.getScalarSizeInBits();
BlendMask =
scaleVectorShuffleBlendMask(BlendMask, NumMaskElts, Scale);
ShuffleVT = MVT::getIntegerVT(EltSizeInBits / Scale);
ShuffleVT = MVT::getVectorVT(ShuffleVT, NumMaskElts * Scale);
}
V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
PermuteImm = (unsigned)BlendMask;
Shuffle = X86ISD::BLENDI;
return true;
}
}
}
// Attempt to combine to INSERTPS.
if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
MaskVT.is128BitVector()) {
if (Zeroable.getBoolValue() &&
matchVectorShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
Shuffle = X86ISD::INSERTPS;
ShuffleVT = MVT::v4f32;
return true;
}
}
// Attempt to combine to SHUFPD.
if (AllowFloatDomain && EltSizeInBits == 64 &&
((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
(MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
(MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
if (matchVectorShuffleWithSHUFPD(MaskVT, V1, V2, PermuteImm, Mask)) {
Shuffle = X86ISD::SHUFP;
ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
return true;
}
}
// Attempt to combine to SHUFPS.
if (AllowFloatDomain && EltSizeInBits == 32 &&
((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
(MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
(MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
SmallVector<int, 4> RepeatedMask;
if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
// Match each half of the repeated mask, to determine if its just
// referencing one of the vectors, is zeroable or entirely undef.
auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
int M0 = RepeatedMask[Offset];
int M1 = RepeatedMask[Offset + 1];
if (isUndefInRange(RepeatedMask, Offset, 2)) {
return DAG.getUNDEF(MaskVT);
} else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
S0 = (SM_SentinelUndef == M0 ? -1 : 0);
S1 = (SM_SentinelUndef == M1 ? -1 : 1);
return getZeroVector(MaskVT, Subtarget, DAG, DL);
} else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
return V1;
} else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
return V2;
}
return SDValue();
};
int ShufMask[4] = {-1, -1, -1, -1};
SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
if (Lo && Hi) {
V1 = Lo;
V2 = Hi;
Shuffle = X86ISD::SHUFP;
ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
PermuteImm = getV4X86ShuffleImm(ShufMask);
return true;
}
}
}
return false;
}
/// Combine an arbitrary chain of shuffles into a single instruction if
/// possible.
///
/// This is the leaf of the recursive combine below. When we have found some
/// chain of single-use x86 shuffle instructions and accumulated the combined
/// shuffle mask represented by them, this will try to pattern match that mask
/// into either a single instruction if there is a special purpose instruction
/// for this operation, or into a PSHUFB instruction which is a fully general
/// instruction but should only be used to replace chains over a certain depth.
static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
ArrayRef<int> BaseMask, int Depth,
bool HasVariableMask, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
assert((Inputs.size() == 1 || Inputs.size() == 2) &&
"Unexpected number of shuffle inputs!");
// Find the inputs that enter the chain. Note that multiple uses are OK
// here, we're not going to remove the operands we find.
bool UnaryShuffle = (Inputs.size() == 1);
SDValue V1 = peekThroughBitcasts(Inputs[0]);
SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
: peekThroughBitcasts(Inputs[1]));
MVT VT1 = V1.getSimpleValueType();
MVT VT2 = V2.getSimpleValueType();
MVT RootVT = Root.getSimpleValueType();
assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&
VT2.getSizeInBits() == RootVT.getSizeInBits() &&
"Vector size mismatch");
SDLoc DL(Root);
SDValue Res;
unsigned NumBaseMaskElts = BaseMask.size();
if (NumBaseMaskElts == 1) {
assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
return DAG.getBitcast(RootVT, V1);
}
unsigned RootSizeInBits = RootVT.getSizeInBits();
unsigned NumRootElts = RootVT.getVectorNumElements();
unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
(RootVT.isFloatingPoint() && Depth >= 2) ||
(RootVT.is256BitVector() && !Subtarget.hasAVX2());
// Don't combine if we are a AVX512/EVEX target and the mask element size
// is different from the root element size - this would prevent writemasks
// from being reused.
// TODO - this currently prevents all lane shuffles from occurring.
// TODO - check for writemasks usage instead of always preventing combining.
// TODO - attempt to narrow Mask back to writemask size.
bool IsEVEXShuffle =
RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128);
// TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
// Handle 128-bit lane shuffles of 256-bit vectors.
// If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
// we need to use the zeroing feature.
// TODO - this should support binary shuffles.
if (UnaryShuffle && RootVT.is256BitVector() && NumBaseMaskElts == 2 &&
!(Subtarget.hasAVX2() && BaseMask[0] >= -1 && BaseMask[1] >= -1) &&
!isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
if (Depth == 1 && Root.getOpcode() == X86ISD::VPERM2X128)
return SDValue(); // Nothing to do!
MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
unsigned PermMask = 0;
PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
Res = DAG.getBitcast(ShuffleVT, V1);
Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
DAG.getUNDEF(ShuffleVT),
DAG.getConstant(PermMask, DL, MVT::i8));
return DAG.getBitcast(RootVT, Res);
}
// For masks that have been widened to 128-bit elements or more,
// narrow back down to 64-bit elements.
SmallVector<int, 64> Mask;
if (BaseMaskEltSizeInBits > 64) {
assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
int MaskScale = BaseMaskEltSizeInBits / 64;
scaleShuffleMask<int>(MaskScale, BaseMask, Mask);
} else {
Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
}
unsigned NumMaskElts = Mask.size();
unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
// Determine the effective mask value type.
FloatDomain &= (32 <= MaskEltSizeInBits);
MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
: MVT::getIntegerVT(MaskEltSizeInBits);
MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
// Only allow legal mask types.
if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
return SDValue();
// Attempt to match the mask against known shuffle patterns.
MVT ShuffleSrcVT, ShuffleVT;
unsigned Shuffle, PermuteImm;
// Which shuffle domains are permitted?
// Permit domain crossing at higher combine depths.
bool AllowFloatDomain = FloatDomain || (Depth > 3);
bool AllowIntDomain = (!FloatDomain || (Depth > 3)) && Subtarget.hasSSE2() &&
(!MaskVT.is256BitVector() || Subtarget.hasAVX2());
// Determine zeroable mask elements.
APInt Zeroable(NumMaskElts, 0);
for (unsigned i = 0; i != NumMaskElts; ++i)
if (isUndefOrZero(Mask[i]))
Zeroable.setBit(i);
if (UnaryShuffle) {
// If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load
// directly if we don't shuffle the lower element and we shuffle the upper
// (zero) elements within themselves.
if (V1.getOpcode() == X86ISD::VZEXT_LOAD &&
(V1.getScalarValueSizeInBits() % MaskEltSizeInBits) == 0) {
unsigned Scale = V1.getScalarValueSizeInBits() / MaskEltSizeInBits;
ArrayRef<int> HiMask(Mask.data() + Scale, NumMaskElts - Scale);
if (isSequentialOrUndefInRange(Mask, 0, Scale, 0) &&
isUndefOrZeroOrInRange(HiMask, Scale, NumMaskElts)) {
return DAG.getBitcast(RootVT, V1);
}
}
SDValue NewV1 = V1; // Save operand in case early exit happens.
if (matchUnaryVectorShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain,
NewV1, DL, DAG, Subtarget, Shuffle,
ShuffleSrcVT, ShuffleVT) &&
(!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
if (Depth == 1 && Root.getOpcode() == Shuffle)
return SDValue(); // Nothing to do!
Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
return DAG.getBitcast(RootVT, Res);
}
if (matchUnaryPermuteVectorShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
AllowIntDomain, Subtarget, Shuffle,
ShuffleVT, PermuteImm) &&
(!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
if (Depth == 1 && Root.getOpcode() == Shuffle)
return SDValue(); // Nothing to do!
Res = DAG.getBitcast(ShuffleVT, V1);
Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
DAG.getConstant(PermuteImm, DL, MVT::i8));
return DAG.getBitcast(RootVT, Res);
}
}
SDValue NewV1 = V1; // Save operands in case early exit happens.
SDValue NewV2 = V2;
if (matchBinaryVectorShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain,
NewV1, NewV2, DL, DAG, Subtarget, Shuffle,
ShuffleSrcVT, ShuffleVT, UnaryShuffle) &&
(!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
if (Depth == 1 && Root.getOpcode() == Shuffle)
return SDValue(); // Nothing to do!
NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
return DAG.getBitcast(RootVT, Res);
}
NewV1 = V1; // Save operands in case early exit happens.
NewV2 = V2;
if (matchBinaryPermuteVectorShuffle(
MaskVT, Mask, Zeroable, AllowFloatDomain, AllowIntDomain, NewV1,
NewV2, DL, DAG, Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
(!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
if (Depth == 1 && Root.getOpcode() == Shuffle)
return SDValue(); // Nothing to do!
NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
DAG.getConstant(PermuteImm, DL, MVT::i8));
return DAG.getBitcast(RootVT, Res);
}
// Typically from here on, we need an integer version of MaskVT.
MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
// Annoyingly, SSE4A instructions don't map into the above match helpers.
if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
uint64_t BitLen, BitIdx;
if (matchVectorShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
Zeroable)) {
if (Depth == 1 && Root.getOpcode() == X86ISD::EXTRQI)
return SDValue(); // Nothing to do!
V1 = DAG.getBitcast(IntMaskVT, V1);
Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
DAG.getConstant(BitLen, DL, MVT::i8),
DAG.getConstant(BitIdx, DL, MVT::i8));
return DAG.getBitcast(RootVT, Res);
}
if (matchVectorShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
if (Depth == 1 && Root.getOpcode() == X86ISD::INSERTQI)
return SDValue(); // Nothing to do!
V1 = DAG.getBitcast(IntMaskVT, V1);
V2 = DAG.getBitcast(IntMaskVT, V2);
Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
DAG.getConstant(BitLen, DL, MVT::i8),
DAG.getConstant(BitIdx, DL, MVT::i8));
return DAG.getBitcast(RootVT, Res);
}
}
// Don't try to re-form single instruction chains under any circumstances now
// that we've done encoding canonicalization for them.
if (Depth < 2)
return SDValue();
// Depth threshold above which we can efficiently use variable mask shuffles.
int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 2 : 3;
bool AllowVariableMask = (Depth >= VariableShuffleDepth) || HasVariableMask;
bool MaskContainsZeros =
any_of(Mask, [](int M) { return M == SM_SentinelZero; });
if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
// If we have a single input lane-crossing shuffle then lower to VPERMV.
if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
((Subtarget.hasAVX2() &&
(MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
(Subtarget.hasAVX512() &&
(MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
(Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
(Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
(Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
(Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
Res = DAG.getBitcast(MaskVT, V1);
Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
return DAG.getBitcast(RootVT, Res);
}
// Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
// vector as the second source.
if (UnaryShuffle && AllowVariableMask &&
((Subtarget.hasAVX512() &&
(MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
(Subtarget.hasVLX() &&
(MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
(Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
(Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
(Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
(Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
// Adjust shuffle mask - replace SM_SentinelZero with second source index.
for (unsigned i = 0; i != NumMaskElts; ++i)
if (Mask[i] == SM_SentinelZero)
Mask[i] = NumMaskElts + i;
SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
Res = DAG.getBitcast(MaskVT, V1);
SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
return DAG.getBitcast(RootVT, Res);
}
// If we have a dual input lane-crossing shuffle then lower to VPERMV3.
if (AllowVariableMask && !MaskContainsZeros &&
((Subtarget.hasAVX512() &&
(MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
(Subtarget.hasVLX() &&
(MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
(Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
(Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
(Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
(Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
V1 = DAG.getBitcast(MaskVT, V1);
V2 = DAG.getBitcast(MaskVT, V2);
Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
return DAG.getBitcast(RootVT, Res);
}
return SDValue();
}
// See if we can combine a single input shuffle with zeros to a bit-mask,
// which is much simpler than any shuffle.
if (UnaryShuffle && MaskContainsZeros && AllowVariableMask &&
isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
APInt UndefElts(NumMaskElts, 0);
SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
for (unsigned i = 0; i != NumMaskElts; ++i) {
int M = Mask[i];
if (M == SM_SentinelUndef) {
UndefElts.setBit(i);
continue;
}
if (M == SM_SentinelZero)
continue;
EltBits[i] = AllOnes;
}
SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
Res = DAG.getBitcast(MaskVT, V1);
unsigned AndOpcode =
FloatDomain ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
return DAG.getBitcast(RootVT, Res);
}
// If we have a single input shuffle with different shuffle patterns in the
// the 128-bit lanes use the variable mask to VPERMILPS.
// TODO Combine other mask types at higher depths.
if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
(MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
SmallVector<SDValue, 16> VPermIdx;
for (int M : Mask) {
SDValue Idx =
M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
VPermIdx.push_back(Idx);
}
SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
Res = DAG.getBitcast(MaskVT, V1);
Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
return DAG.getBitcast(RootVT, Res);
}
// With XOP, binary shuffles of 128/256-bit floating point vectors can combine
// to VPERMIL2PD/VPERMIL2PS.
if (AllowVariableMask && Subtarget.hasXOP() &&
(MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
MaskVT == MVT::v8f32)) {
// VPERMIL2 Operation.
// Bits[3] - Match Bit.
// Bits[2:1] - (Per Lane) PD Shuffle Mask.
// Bits[2:0] - (Per Lane) PS Shuffle Mask.
unsigned NumLanes = MaskVT.getSizeInBits() / 128;
unsigned NumEltsPerLane = NumMaskElts / NumLanes;
SmallVector<int, 8> VPerm2Idx;
unsigned M2ZImm = 0;
for (int M : Mask) {
if (M == SM_SentinelUndef) {
VPerm2Idx.push_back(-1);
continue;
}
if (M == SM_SentinelZero) {
M2ZImm = 2;
VPerm2Idx.push_back(8);
continue;
}
int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
VPerm2Idx.push_back(Index);
}
V1 = DAG.getBitcast(MaskVT, V1);
V2 = DAG.getBitcast(MaskVT, V2);
SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
DAG.getConstant(M2ZImm, DL, MVT::i8));
return DAG.getBitcast(RootVT, Res);
}
// If we have 3 or more shuffle instructions or a chain involving a variable
// mask, we can replace them with a single PSHUFB instruction profitably.
// Intel's manuals suggest only using PSHUFB if doing so replacing 5
// instructions, but in practice PSHUFB tends to be *very* fast so we're
// more aggressive.
if (UnaryShuffle && AllowVariableMask &&
((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
(RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
(RootVT.is512BitVector() && Subtarget.hasBWI()))) {
SmallVector<SDValue, 16> PSHUFBMask;
int NumBytes = RootVT.getSizeInBits() / 8;
int Ratio = NumBytes / NumMaskElts;
for (int i = 0; i < NumBytes; ++i) {
int M = Mask[i / Ratio];
if (M == SM_SentinelUndef) {
PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
continue;
}
if (M == SM_SentinelZero) {
PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
continue;
}
M = Ratio * M + i % Ratio;
assert((M / 16) == (i / 16) && "Lane crossing detected");
PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
}
MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
Res = DAG.getBitcast(ByteVT, V1);
SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
return DAG.getBitcast(RootVT, Res);
}
// With XOP, if we have a 128-bit binary input shuffle we can always combine
// to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
// slower than PSHUFB on targets that support both.
if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) {
// VPPERM Mask Operation
// Bits[4:0] - Byte Index (0 - 31)
// Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
SmallVector<SDValue, 16> VPPERMMask;
int NumBytes = 16;
int Ratio = NumBytes / NumMaskElts;
for (int i = 0; i < NumBytes; ++i) {
int M = Mask[i / Ratio];
if (M == SM_SentinelUndef) {
VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
continue;
}
if (M == SM_SentinelZero) {
VPPERMMask.push_back(DAG.getConstant(128, DL, MVT::i8));
continue;
}
M = Ratio * M + i % Ratio;
VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
}
MVT ByteVT = MVT::v16i8;
V1 = DAG.getBitcast(ByteVT, V1);
V2 = DAG.getBitcast(ByteVT, V2);
SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
return DAG.getBitcast(RootVT, Res);
}
// Failed to find any combines.
return SDValue();
}
// Attempt to constant fold all of the constant source ops.
// Returns true if the entire shuffle is folded to a constant.
// TODO: Extend this to merge multiple constant Ops and update the mask.
static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
ArrayRef<int> Mask, SDValue Root,
bool HasVariableMask,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Root.getSimpleValueType();
unsigned SizeInBits = VT.getSizeInBits();
unsigned NumMaskElts = Mask.size();
unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
unsigned NumOps = Ops.size();
// Extract constant bits from each source op.
bool OneUseConstantOp = false;
SmallVector<APInt, 16> UndefEltsOps(NumOps);
SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
for (unsigned i = 0; i != NumOps; ++i) {
SDValue SrcOp = Ops[i];
OneUseConstantOp |= SrcOp.hasOneUse();
if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
RawBitsOps[i]))
return SDValue();
}
// Only fold if at least one of the constants is only used once or
// the combined shuffle has included a variable mask shuffle, this
// is to avoid constant pool bloat.
if (!OneUseConstantOp && !HasVariableMask)
return SDValue();
// Shuffle the constant bits according to the mask.
APInt UndefElts(NumMaskElts, 0);
APInt ZeroElts(NumMaskElts, 0);
APInt ConstantElts(NumMaskElts, 0);
SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
APInt::getNullValue(MaskSizeInBits));
for (unsigned i = 0; i != NumMaskElts; ++i) {
int M = Mask[i];
if (M == SM_SentinelUndef) {
UndefElts.setBit(i);
continue;
} else if (M == SM_SentinelZero) {
ZeroElts.setBit(i);
continue;
}
assert(0 <= M && M < (int)(NumMaskElts * NumOps));
unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
if (SrcUndefElts[SrcMaskIdx]) {
UndefElts.setBit(i);
continue;
}
auto &SrcEltBits = RawBitsOps[SrcOpIdx];
APInt &Bits = SrcEltBits[SrcMaskIdx];
if (!Bits) {
ZeroElts.setBit(i);
continue;
}
ConstantElts.setBit(i);
ConstantBitData[i] = Bits;
}
assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());
// Create the constant data.
MVT MaskSVT;
if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
else
MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
SDLoc DL(Root);
SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
return DAG.getBitcast(VT, CstOp);
}
/// Fully generic combining of x86 shuffle instructions.
///
/// This should be the last combine run over the x86 shuffle instructions. Once
/// they have been fully optimized, this will recursively consider all chains
/// of single-use shuffle instructions, build a generic model of the cumulative
/// shuffle operation, and check for simpler instructions which implement this
/// operation. We use this primarily for two purposes:
///
/// 1) Collapse generic shuffles to specialized single instructions when
/// equivalent. In most cases, this is just an encoding size win, but
/// sometimes we will collapse multiple generic shuffles into a single
/// special-purpose shuffle.
/// 2) Look for sequences of shuffle instructions with 3 or more total
/// instructions, and replace them with the slightly more expensive SSSE3
/// PSHUFB instruction if available. We do this as the last combining step
/// to ensure we avoid using PSHUFB if we can implement the shuffle with
/// a suitable short sequence of other instructions. The PSHUFB will either
/// use a register or have to read from memory and so is slightly (but only
/// slightly) more expensive than the other shuffle instructions.
///
/// Because this is inherently a quadratic operation (for each shuffle in
/// a chain, we recurse up the chain), the depth is limited to 8 instructions.
/// This should never be an issue in practice as the shuffle lowering doesn't
/// produce sequences of more than 8 instructions.
///
/// FIXME: We will currently miss some cases where the redundant shuffling
/// would simplify under the threshold for PSHUFB formation because of
/// combine-ordering. To fix this, we should do the redundant instruction
/// combining in this recursive walk.
static SDValue combineX86ShufflesRecursively(
ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
bool HasVariableMask, SelectionDAG &DAG, const X86Subtarget &Subtarget) {
// Bound the depth of our recursive combine because this is ultimately
// quadratic in nature.
const unsigned MaxRecursionDepth = 8;
if (Depth > MaxRecursionDepth)
return SDValue();
// Directly rip through bitcasts to find the underlying operand.
SDValue Op = SrcOps[SrcOpIndex];
Op = peekThroughOneUseBitcasts(Op);
MVT VT = Op.getSimpleValueType();
if (!VT.isVector())
return SDValue(); // Bail if we hit a non-vector.
assert(Root.getSimpleValueType().isVector() &&
"Shuffles operate on vector types!");
assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
"Can only combine shuffles of the same vector register size.");
// Extract target shuffle mask and resolve sentinels and inputs.
SmallVector<int, 64> OpMask;
SmallVector<SDValue, 2> OpInputs;
if (!resolveTargetShuffleInputs(Op, OpInputs, OpMask, DAG))
return SDValue();
assert(OpInputs.size() <= 2 && "Too many shuffle inputs");
SDValue Input0 = (OpInputs.size() > 0 ? OpInputs[0] : SDValue());
SDValue Input1 = (OpInputs.size() > 1 ? OpInputs[1] : SDValue());
// Add the inputs to the Ops list, avoiding duplicates.
SmallVector<SDValue, 16> Ops(SrcOps.begin(), SrcOps.end());
int InputIdx0 = -1, InputIdx1 = -1;
for (int i = 0, e = Ops.size(); i < e; ++i) {
SDValue BC = peekThroughBitcasts(Ops[i]);
if (Input0 && BC == peekThroughBitcasts(Input0))
InputIdx0 = i;
if (Input1 && BC == peekThroughBitcasts(Input1))
InputIdx1 = i;
}
if (Input0 && InputIdx0 < 0) {
InputIdx0 = SrcOpIndex;
Ops[SrcOpIndex] = Input0;
}
if (Input1 && InputIdx1 < 0) {
InputIdx1 = Ops.size();
Ops.push_back(Input1);
}
assert(((RootMask.size() > OpMask.size() &&
RootMask.size() % OpMask.size() == 0) ||
(OpMask.size() > RootMask.size() &&
OpMask.size() % RootMask.size() == 0) ||
OpMask.size() == RootMask.size()) &&
"The smaller number of elements must divide the larger.");
// This function can be performance-critical, so we rely on the power-of-2
// knowledge that we have about the mask sizes to replace div/rem ops with
// bit-masks and shifts.
assert(isPowerOf2_32(RootMask.size()) && "Non-power-of-2 shuffle mask sizes");
assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
unsigned RootRatio = std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
assert((RootRatio == 1 || OpRatio == 1) &&
"Must not have a ratio for both incoming and op masks!");
assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
SmallVector<int, 64> Mask(MaskWidth, SM_SentinelUndef);
// Merge this shuffle operation's mask into our accumulated mask. Note that
// this shuffle's mask will be the first applied to the input, followed by the
// root mask to get us all the way to the root value arrangement. The reason
// for this order is that we are recursing up the operation chain.
for (unsigned i = 0; i < MaskWidth; ++i) {
unsigned RootIdx = i >> RootRatioLog2;
if (RootMask[RootIdx] < 0) {
// This is a zero or undef lane, we're done.
Mask[i] = RootMask[RootIdx];
continue;
}
unsigned RootMaskedIdx =
RootRatio == 1
? RootMask[RootIdx]
: (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
// Just insert the scaled root mask value if it references an input other
// than the SrcOp we're currently inserting.
if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
(((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
Mask[i] = RootMaskedIdx;
continue;
}
RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
if (OpMask[OpIdx] < 0) {
// The incoming lanes are zero or undef, it doesn't matter which ones we
// are using.
Mask[i] = OpMask[OpIdx];
continue;
}
// Ok, we have non-zero lanes, map them through to one of the Op's inputs.
unsigned OpMaskedIdx =
OpRatio == 1
? OpMask[OpIdx]
: (OpMask[OpIdx] << OpRatioLog2) + (RootMaskedIdx & (OpRatio - 1));
OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
if (OpMask[OpIdx] < (int)OpMask.size()) {
assert(0 <= InputIdx0 && "Unknown target shuffle input");
OpMaskedIdx += InputIdx0 * MaskWidth;
} else {
assert(0 <= InputIdx1 && "Unknown target shuffle input");
OpMaskedIdx += InputIdx1 * MaskWidth;
}
Mask[i] = OpMaskedIdx;
}
// Handle the all undef/zero cases early.
if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
return DAG.getUNDEF(Root.getValueType());
// TODO - should we handle the mixed zero/undef case as well? Just returning
// a zero mask will lose information on undef elements possibly reducing
// future combine possibilities.
if (all_of(Mask, [](int Idx) { return Idx < 0; }))
return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG,
SDLoc(Root));
// Remove unused shuffle source ops.
resolveTargetShuffleInputsAndMask(Ops, Mask);
assert(!Ops.empty() && "Shuffle with no inputs detected");
HasVariableMask |= isTargetShuffleVariableMask(Op.getOpcode());
// Update the list of shuffle nodes that have been combined so far.
SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
SrcNodes.end());
CombinedNodes.push_back(Op.getNode());
// See if we can recurse into each shuffle source op (if it's a target
// shuffle). The source op should only be combined if it either has a
// single use (i.e. current Op) or all its users have already been combined.
// Don't recurse if we already have more source ops than we can combine in
// the remaining recursion depth.
if (Ops.size() < (MaxRecursionDepth - Depth)) {
for (int i = 0, e = Ops.size(); i < e; ++i)
if (Ops[i].getNode()->hasOneUse() ||
SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
if (SDValue Res = combineX86ShufflesRecursively(
Ops, i, Root, Mask, CombinedNodes, Depth + 1, HasVariableMask,
DAG, Subtarget))
return Res;
}
// Attempt to constant fold all of the constant source ops.
if (SDValue Cst = combineX86ShufflesConstants(
Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
return Cst;
// We can only combine unary and binary shuffle mask cases.
if (Ops.size() > 2)
return SDValue();
// Minor canonicalization of the accumulated shuffle mask to make it easier
// to match below. All this does is detect masks with sequential pairs of
// elements, and shrink them to the half-width mask. It does this in a loop
// so it will reduce the size of the mask to the minimal width mask which
// performs an equivalent shuffle.
SmallVector<int, 64> WidenedMask;
while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
Mask = std::move(WidenedMask);
}
// Canonicalization of binary shuffle masks to improve pattern matching by
// commuting the inputs.
if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
ShuffleVectorSDNode::commuteMask(Mask);
std::swap(Ops[0], Ops[1]);
}
// Finally, try to combine into a single shuffle instruction.
return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask, DAG,
Subtarget);
}
/// Get the PSHUF-style mask from PSHUF node.
///
/// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
/// PSHUF-style masks that can be reused with such instructions.
static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
MVT VT = N.getSimpleValueType();
SmallVector<int, 4> Mask;
SmallVector<SDValue, 2> Ops;
bool IsUnary;
bool HaveMask =
getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
(void)HaveMask;
assert(HaveMask);
// If we have more than 128-bits, only the low 128-bits of shuffle mask
// matter. Check that the upper masks are repeats and remove them.
if (VT.getSizeInBits() > 128) {
int LaneElts = 128 / VT.getScalarSizeInBits();
#ifndef NDEBUG
for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
for (int j = 0; j < LaneElts; ++j)
assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
"Mask doesn't repeat in high 128-bit lanes!");
#endif
Mask.resize(LaneElts);
}
switch (N.getOpcode()) {
case X86ISD::PSHUFD:
return Mask;
case X86ISD::PSHUFLW:
Mask.resize(4);
return Mask;
case X86ISD::PSHUFHW:
Mask.erase(Mask.begin(), Mask.begin() + 4);
for (int &M : Mask)
M -= 4;
return Mask;
default:
llvm_unreachable("No valid shuffle instruction found!");
}
}
/// Search for a combinable shuffle across a chain ending in pshufd.
///
/// We walk up the chain and look for a combinable shuffle, skipping over
/// shuffles that we could hoist this shuffle's transformation past without
/// altering anything.
static SDValue
combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
SelectionDAG &DAG) {
assert(N.getOpcode() == X86ISD::PSHUFD &&
"Called with something other than an x86 128-bit half shuffle!");
SDLoc DL(N);
// Walk up a single-use chain looking for a combinable shuffle. Keep a stack
// of the shuffles in the chain so that we can form a fresh chain to replace
// this one.
SmallVector<SDValue, 8> Chain;
SDValue V = N.getOperand(0);
for (; V.hasOneUse(); V = V.getOperand(0)) {
switch (V.getOpcode()) {
default:
return SDValue(); // Nothing combined!
case ISD::BITCAST:
// Skip bitcasts as we always know the type for the target specific
// instructions.
continue;
case X86ISD::PSHUFD:
// Found another dword shuffle.
break;
case X86ISD::PSHUFLW:
// Check that the low words (being shuffled) are the identity in the
// dword shuffle, and the high words are self-contained.
if (Mask[0] != 0 || Mask[1] != 1 ||
!(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
return SDValue();
Chain.push_back(V);
continue;
case X86ISD::PSHUFHW:
// Check that the high words (being shuffled) are the identity in the
// dword shuffle, and the low words are self-contained.
if (Mask[2] != 2 || Mask[3] != 3 ||
!(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
return SDValue();
Chain.push_back(V);
continue;
case X86ISD::UNPCKL:
case X86ISD::UNPCKH:
// For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
// shuffle into a preceding word shuffle.
if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
V.getSimpleValueType().getVectorElementType() != MVT::i16)
return SDValue();
// Search for a half-shuffle which we can combine with.
unsigned CombineOp =
V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
if (V.getOperand(0) != V.getOperand(1) ||
!V->isOnlyUserOf(V.getOperand(0).getNode()))
return SDValue();
Chain.push_back(V);
V = V.getOperand(0);
do {
switch (V.getOpcode()) {
default:
return SDValue(); // Nothing to combine.
case X86ISD::PSHUFLW:
case X86ISD::PSHUFHW:
if (V.getOpcode() == CombineOp)
break;
Chain.push_back(V);
LLVM_FALLTHROUGH;
case ISD::BITCAST:
V = V.getOperand(0);
continue;
}
break;
} while (V.hasOneUse());
break;
}
// Break out of the loop if we break out of the switch.
break;
}
if (!V.hasOneUse())
// We fell out of the loop without finding a viable combining instruction.
return SDValue();
// Merge this node's mask and our incoming mask.
SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
for (int &M : Mask)
M = VMask[M];
V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
// Rebuild the chain around this new shuffle.
while (!Chain.empty()) {
SDValue W = Chain.pop_back_val();
if (V.getValueType() != W.getOperand(0).getValueType())
V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
switch (W.getOpcode()) {
default:
llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
case X86ISD::UNPCKL:
case X86ISD::UNPCKH:
V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
break;
case X86ISD::PSHUFD:
case X86ISD::PSHUFLW:
case X86ISD::PSHUFHW:
V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
break;
}
}
if (V.getValueType() != N.getValueType())
V = DAG.getBitcast(N.getValueType(), V);
// Return the new chain to replace N.
return V;
}
/// Search for a combinable shuffle across a chain ending in pshuflw or
/// pshufhw.
///
/// We walk up the chain, skipping shuffles of the other half and looking
/// through shuffles which switch halves trying to find a shuffle of the same
/// pair of dwords.
static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
assert(
(N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
"Called with something other than an x86 128-bit half shuffle!");
SDLoc DL(N);
unsigned CombineOpcode = N.getOpcode();
// Walk up a single-use chain looking for a combinable shuffle.
SDValue V = N.getOperand(0);
for (; V.hasOneUse(); V = V.getOperand(0)) {
switch (V.getOpcode()) {
default:
return false; // Nothing combined!
case ISD::BITCAST:
// Skip bitcasts as we always know the type for the target specific
// instructions.
continue;
case X86ISD::PSHUFLW:
case X86ISD::PSHUFHW:
if (V.getOpcode() == CombineOpcode)
break;
// Other-half shuffles are no-ops.
continue;
}
// Break out of the loop if we break out of the switch.
break;
}
if (!V.hasOneUse())
// We fell out of the loop without finding a viable combining instruction.
return false;
// Combine away the bottom node as its shuffle will be accumulated into
// a preceding shuffle.
DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
// Record the old value.
SDValue Old = V;
// Merge this node's mask and our incoming mask (adjusted to account for all
// the pshufd instructions encountered).
SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
for (int &M : Mask)
M = VMask[M];
V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
// Check that the shuffles didn't cancel each other out. If not, we need to
// combine to the new one.
if (Old != V)
// Replace the combinable shuffle with the combined one, updating all users
// so that we re-evaluate the chain here.
DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
return true;
}
/// Try to combine x86 target specific shuffles.
static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
MVT VT = N.getSimpleValueType();
SmallVector<int, 4> Mask;
unsigned Opcode = N.getOpcode();
// Combine binary shuffle of 2 similar 'Horizontal' instructions into a
// single instruction.
if (VT.getScalarSizeInBits() == 64 &&
(Opcode == X86ISD::MOVSD || Opcode == X86ISD::UNPCKH ||
Opcode == X86ISD::UNPCKL)) {
auto BC0 = peekThroughBitcasts(N.getOperand(0));
auto BC1 = peekThroughBitcasts(N.getOperand(1));
EVT VT0 = BC0.getValueType();
EVT VT1 = BC1.getValueType();
unsigned Opcode0 = BC0.getOpcode();
unsigned Opcode1 = BC1.getOpcode();
if (Opcode0 == Opcode1 && VT0 == VT1 &&
(Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB ||
Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
SDValue Lo, Hi;
if (Opcode == X86ISD::MOVSD) {
Lo = BC1.getOperand(0);
Hi = BC0.getOperand(1);
} else {
Lo = BC0.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
Hi = BC1.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
}
SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
return DAG.getBitcast(VT, Horiz);
}
}
switch (Opcode) {
case X86ISD::VBROADCAST: {
// If broadcasting from another shuffle, attempt to simplify it.
// TODO - we really need a general SimplifyDemandedVectorElts mechanism.
SDValue Src = N.getOperand(0);
SDValue BC = peekThroughBitcasts(Src);
EVT SrcVT = Src.getValueType();
EVT BCVT = BC.getValueType();
if (isTargetShuffle(BC.getOpcode()) &&
VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
SM_SentinelUndef);
for (unsigned i = 0; i != Scale; ++i)
DemandedMask[i] = i;
if (SDValue Res = combineX86ShufflesRecursively(
{BC}, 0, BC, DemandedMask, {}, /*Depth*/ 1,
/*HasVarMask*/ false, DAG, Subtarget))
return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
DAG.getBitcast(SrcVT, Res));
}
return SDValue();
}
case X86ISD::PSHUFD:
case X86ISD::PSHUFLW:
case X86ISD::PSHUFHW:
Mask = getPSHUFShuffleMask(N);
assert(Mask.size() == 4);
break;
case X86ISD::UNPCKL: {
// Combine X86ISD::UNPCKL and ISD::VECTOR_SHUFFLE into X86ISD::UNPCKH, in
// which X86ISD::UNPCKL has a ISD::UNDEF operand, and ISD::VECTOR_SHUFFLE
// moves upper half elements into the lower half part. For example:
//
// t2: v16i8 = vector_shuffle<8,9,10,11,12,13,14,15,u,u,u,u,u,u,u,u> t1,
// undef:v16i8
// t3: v16i8 = X86ISD::UNPCKL undef:v16i8, t2
//
// will be combined to:
//
// t3: v16i8 = X86ISD::UNPCKH undef:v16i8, t1
// This is only for 128-bit vectors. From SSE4.1 onward this combine may not
// happen due to advanced instructions.
if (!VT.is128BitVector())
return SDValue();
auto Op0 = N.getOperand(0);
auto Op1 = N.getOperand(1);
if (Op0.isUndef() && Op1.getOpcode() == ISD::VECTOR_SHUFFLE) {
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op1.getNode())->getMask();
unsigned NumElts = VT.getVectorNumElements();
SmallVector<int, 8> ExpectedMask(NumElts, -1);
std::iota(ExpectedMask.begin(), ExpectedMask.begin() + NumElts / 2,
NumElts / 2);
auto ShufOp = Op1.getOperand(0);
if (isShuffleEquivalent(Op1, ShufOp, Mask, ExpectedMask))
return DAG.getNode(X86ISD::UNPCKH, DL, VT, N.getOperand(0), ShufOp);
}
return SDValue();
}
case X86ISD::MOVSD:
case X86ISD::MOVSS: {
SDValue N0 = N.getOperand(0);
SDValue N1 = N.getOperand(1);
// Canonicalize scalar FPOps:
// MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
// If commutable, allow OP(N1[0], N0[0]).
unsigned Opcode1 = N1.getOpcode();
if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
Opcode1 == ISD::FDIV) {
SDValue N10 = N1.getOperand(0);
SDValue N11 = N1.getOperand(1);
if (N10 == N0 ||
(N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
if (N10 != N0)
std::swap(N10, N11);
MVT SVT = VT.getVectorElementType();
SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
return DAG.getNode(Opcode, DL, VT, N0, SclVec);
}
}
return SDValue();
}
case X86ISD::INSERTPS: {
assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
SDValue Op0 = N.getOperand(0);
SDValue Op1 = N.getOperand(1);
SDValue Op2 = N.getOperand(2);
unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
unsigned ZeroMask = InsertPSMask & 0xF;
// If we zero out all elements from Op0 then we don't need to reference it.
if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
DAG.getConstant(InsertPSMask, DL, MVT::i8));
// If we zero out the element from Op1 then we don't need to reference it.
if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
DAG.getConstant(InsertPSMask, DL, MVT::i8));
// Attempt to merge insertps Op1 with an inner target shuffle node.
SmallVector<int, 8> TargetMask1;
SmallVector<SDValue, 2> Ops1;
if (setTargetShuffleZeroElements(Op1, TargetMask1, Ops1)) {
int M = TargetMask1[SrcIdx];
if (isUndefOrZero(M)) {
// Zero/UNDEF insertion - zero out element and remove dependency.
InsertPSMask |= (1u << DstIdx);
return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
DAG.getConstant(InsertPSMask, DL, MVT::i8));
}
// Update insertps mask srcidx and reference the source input directly.
assert(0 <= M && M < 8 && "Shuffle index out of range");
InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
Op1 = Ops1[M < 4 ? 0 : 1];
return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
DAG.getConstant(InsertPSMask, DL, MVT::i8));
}
// Attempt to merge insertps Op0 with an inner target shuffle node.
SmallVector<int, 8> TargetMask0;
SmallVector<SDValue, 2> Ops0;
if (!setTargetShuffleZeroElements(Op0, TargetMask0, Ops0))
return SDValue();
bool Updated = false;
bool UseInput00 = false;
bool UseInput01 = false;
for (int i = 0; i != 4; ++i) {
int M = TargetMask0[i];
if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
// No change if element is already zero or the inserted element.
continue;
} else if (isUndefOrZero(M)) {
// If the target mask is undef/zero then we must zero the element.
InsertPSMask |= (1u << i);
Updated = true;
continue;
}
// The input vector element must be inline.
if (M != i && M != (i + 4))
return SDValue();
// Determine which inputs of the target shuffle we're using.
UseInput00 |= (0 <= M && M < 4);
UseInput01 |= (4 <= M);
}
// If we're not using both inputs of the target shuffle then use the
// referenced input directly.
if (UseInput00 && !UseInput01) {
Updated = true;
Op0 = Ops0[0];
} else if (!UseInput00 && UseInput01) {
Updated = true;
Op0 = Ops0[1];
}
if (Updated)
return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
DAG.getConstant(InsertPSMask, DL, MVT::i8));
return SDValue();
}
default:
return SDValue();
}
// Nuke no-op shuffles that show up after combining.
if (isNoopShuffleMask(Mask))
return N.getOperand(0);
// Look for simplifications involving one or two shuffle instructions.
SDValue V = N.getOperand(0);
switch (N.getOpcode()) {
default:
break;
case X86ISD::PSHUFLW:
case X86ISD::PSHUFHW:
assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
return SDValue(); // We combined away this shuffle, so we're done.
// See if this reduces to a PSHUFD which is no more expensive and can
// combine with more operations. Note that it has to at least flip the
// dwords as otherwise it would have been removed as a no-op.
if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
int DMask[] = {0, 1, 2, 3};
int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
DMask[DOffset + 0] = DOffset + 1;
DMask[DOffset + 1] = DOffset + 0;
MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
V = DAG.getBitcast(DVT, V);
V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
return DAG.getBitcast(VT, V);
}
// Look for shuffle patterns which can be implemented as a single unpack.
// FIXME: This doesn't handle the location of the PSHUFD generically, and
// only works when we have a PSHUFD followed by two half-shuffles.
if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
(V.getOpcode() == X86ISD::PSHUFLW ||
V.getOpcode() == X86ISD::PSHUFHW) &&
V.getOpcode() != N.getOpcode() &&
V.hasOneUse()) {
SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
int WordMask[8];
for (int i = 0; i < 4; ++i) {
WordMask[i + NOffset] = Mask[i] + NOffset;
WordMask[i + VOffset] = VMask[i] + VOffset;
}
// Map the word mask through the DWord mask.
int MappedMask[8];
for (int i = 0; i < 8; ++i)
MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
// We can replace all three shuffles with an unpack.
V = DAG.getBitcast(VT, D.getOperand(0));
return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
: X86ISD::UNPCKH,
DL, VT, V, V);
}
}
}
break;
case X86ISD::PSHUFD:
if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
return NewN;
break;
}
return SDValue();
}
/// Checks if the shuffle mask takes subsequent elements
/// alternately from two vectors.
/// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
int ParitySrc[2] = {-1, -1};
unsigned Size = Mask.size();
for (unsigned i = 0; i != Size; ++i) {
int M = Mask[i];
if (M < 0)
continue;
// Make sure we are using the matching element from the input.
if ((M % Size) != i)
return false;
// Make sure we use the same input for all elements of the same parity.
int Src = M / Size;
if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
return false;
ParitySrc[i % 2] = Src;
}
// Make sure each input is used.
if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
return false;
Op0Even = ParitySrc[0] == 0;
return true;
}
/// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
/// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
/// are written to the parameters \p Opnd0 and \p Opnd1.
///
/// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
/// so it is easier to generically match. We also insert dummy vector shuffle
/// nodes for the operands which explicitly discard the lanes which are unused
/// by this operation to try to flow through the rest of the combiner
/// the fact that they're unused.
static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
bool &IsSubAdd) {
EVT VT = N->getValueType(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
!VT.getSimpleVT().isFloatingPoint())
return false;
// We only handle target-independent shuffles.
// FIXME: It would be easy and harmless to use the target shuffle mask
// extraction tool to support more.
if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
return false;
SDValue V1 = N->getOperand(0);
SDValue V2 = N->getOperand(1);
// Make sure we have an FADD and an FSUB.
if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
(V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
V1.getOpcode() == V2.getOpcode())
return false;
// If there are other uses of these operations we can't fold them.
if (!V1->hasOneUse() || !V2->hasOneUse())
return false;
// Ensure that both operations have the same operands. Note that we can
// commute the FADD operands.
SDValue LHS, RHS;
if (V1.getOpcode() == ISD::FSUB) {
LHS = V1->getOperand(0); RHS = V1->getOperand(1);
if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
(V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
return false;
} else {
assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
LHS = V2->getOperand(0); RHS = V2->getOperand(1);
if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
(V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
return false;
}
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
bool Op0Even;
if (!isAddSubOrSubAddMask(Mask, Op0Even))
return false;
// It's a subadd if the vector in the even parity is an FADD.
IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
: V2->getOpcode() == ISD::FADD;
Opnd0 = LHS;
Opnd1 = RHS;
return true;
}
/// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
static SDValue combineShuffleToFMAddSub(SDNode *N,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
// We only handle target-independent shuffles.
// FIXME: It would be easy and harmless to use the target shuffle mask
// extraction tool to support more.
if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
return SDValue();
MVT VT = N->getSimpleValueType(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
return SDValue();
// We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
SDValue FMAdd = Op0, FMSub = Op1;
if (FMSub.getOpcode() != X86ISD::FMSUB)
std::swap(FMAdd, FMSub);
if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
FMAdd.getOperand(2) != FMSub.getOperand(2))
return SDValue();
// Check for correct shuffle mask.
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
bool Op0Even;
if (!isAddSubOrSubAddMask(Mask, Op0Even))
return SDValue();
// FMAddSub takes zeroth operand from FMSub node.
SDLoc DL(N);
bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
FMAdd.getOperand(2));
}
/// Try to combine a shuffle into a target-specific add-sub or
/// mul-add-sub node.
static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
return V;
SDValue Opnd0, Opnd1;
bool IsSubAdd;
if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
return SDValue();
MVT VT = N->getSimpleValueType(0);
SDLoc DL(N);
// Try to generate X86ISD::FMADDSUB node here.
SDValue Opnd2;
if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
}
if (IsSubAdd)
return SDValue();
// Do not generate X86ISD::ADDSUB node for 512-bit types even though
// the ADDSUB idiom has been successfully recognized. There are no known
// X86 targets with 512-bit ADDSUB instructions!
if (VT.is512BitVector())
return SDValue();
return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
}
// We are looking for a shuffle where both sources are concatenated with undef
// and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
// if we can express this as a single-source shuffle, that's preferable.
static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
return SDValue();
EVT VT = N->getValueType(0);
// We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
if (!VT.is128BitVector() && !VT.is256BitVector())
return SDValue();
if (VT.getVectorElementType() != MVT::i32 &&
VT.getVectorElementType() != MVT::i64 &&
VT.getVectorElementType() != MVT::f32 &&
VT.getVectorElementType() != MVT::f64)
return SDValue();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// Check that both sources are concats with undef.
if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
!N1.getOperand(1).isUndef())
return SDValue();
// Construct the new shuffle mask. Elements from the first source retain their
// index, but elements from the second source no longer need to skip an undef.
SmallVector<int, 8> Mask;
int NumElts = VT.getVectorNumElements();
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
for (int Elt : SVOp->getMask())
Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
SDLoc DL(N);
SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
N1.getOperand(0));
return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
}
/// Eliminate a redundant shuffle of a horizontal math op.
static SDValue foldShuffleOfHorizOp(SDNode *N) {
if (N->getOpcode() != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
return SDValue();
SDValue HOp = N->getOperand(0);
if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
return SDValue();
// 128-bit horizontal math instructions are defined to operate on adjacent
// lanes of each operand as:
// v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]
// ...similarly for v2f64 and v8i16.
// TODO: Handle UNDEF operands.
if (HOp.getOperand(0) != HOp.getOperand(1))
return SDValue();
// When the operands of a horizontal math op are identical, the low half of
// the result is the same as the high half. If the shuffle is also replicating
// low and high halves, we don't need the shuffle.
// shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
// TODO: Other mask possibilities like {1,1} and {1,0} could be added here,
// but this should be tied to whatever horizontal op matching and shuffle
// canonicalization are producing.
if (HOp.getValueSizeInBits() == 128 &&
(isTargetShuffleEquivalent(Mask, {0, 0}) ||
isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) ||
isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3})))
return HOp;
if (HOp.getValueSizeInBits() == 256 &&
(isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) ||
isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) ||
isTargetShuffleEquivalent(
Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11})))
return HOp;
return SDValue();
}
static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// If we have legalized the vector types, look for blends of FADD and FSUB
// nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
if (TLI.isTypeLegal(VT)) {
if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
return AddSub;
if (SDValue HAddSub = foldShuffleOfHorizOp(N))
return HAddSub;
}
// During Type Legalization, when promoting illegal vector types,
// the backend might introduce new shuffle dag nodes and bitcasts.
//
// This code performs the following transformation:
// fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
// (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
//
// We do this only if both the bitcast and the BINOP dag nodes have
// one use. Also, perform this transformation only if the new binary
// operation is legal. This is to avoid introducing dag nodes that
// potentially need to be further expanded (or custom lowered) into a
// less optimal sequence of dag nodes.
if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
N->getOpcode() == ISD::VECTOR_SHUFFLE &&
N->getOperand(0).getOpcode() == ISD::BITCAST &&
N->getOperand(1).isUndef() && N->getOperand(0).hasOneUse()) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue BC0 = N0.getOperand(0);
EVT SVT = BC0.getValueType();
unsigned Opcode = BC0.getOpcode();
unsigned NumElts = VT.getVectorNumElements();
if (BC0.hasOneUse() && SVT.isVector() &&
SVT.getVectorNumElements() * 2 == NumElts &&
TLI.isOperationLegal(Opcode, VT)) {
bool CanFold = false;
switch (Opcode) {
default : break;
case ISD::ADD:
case ISD::SUB:
case ISD::MUL:
// isOperationLegal lies for integer ops on floating point types.
CanFold = VT.isInteger();
break;
case ISD::FADD:
case ISD::FSUB:
case ISD::FMUL:
// isOperationLegal lies for floating point ops on integer types.
CanFold = VT.isFloatingPoint();
break;
}
unsigned SVTNumElts = SVT.getVectorNumElements();
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
CanFold = SVOp->getMaskElt(i) < 0;
if (CanFold) {
SDValue BC00 = DAG.getBitcast(VT, BC0.getOperand(0));
SDValue BC01 = DAG.getBitcast(VT, BC0.getOperand(1));
SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, SVOp->getMask());
}
}
}
// Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
// load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
// consecutive, non-overlapping, and in the right order.
SmallVector<SDValue, 16> Elts;
for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
if (SDValue Elt = getShuffleScalarElt(N, i, DAG, 0)) {
Elts.push_back(Elt);
continue;
}
Elts.clear();
break;
}
if (Elts.size() == VT.getVectorNumElements())
if (SDValue LD =
EltsFromConsecutiveLoads(VT, Elts, dl, DAG, Subtarget, true))
return LD;
// For AVX2, we sometimes want to combine
// (vector_shuffle <mask> (concat_vectors t1, undef)
// (concat_vectors t2, undef))
// Into:
// (vector_shuffle <mask> (concat_vectors t1, t2), undef)
// Since the latter can be efficiently lowered with VPERMD/VPERMQ
if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
return ShufConcat;
if (isTargetShuffle(N->getOpcode())) {
SDValue Op(N, 0);
if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
return Shuffle;
// Try recursively combining arbitrary sequences of x86 shuffle
// instructions into higher-order shuffles. We do this after combining
// specific PSHUF instruction sequences into their minimal form so that we
// can evaluate how many specialized shuffle instructions are involved in
// a particular chain.
if (SDValue Res = combineX86ShufflesRecursively(
{Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false, DAG, Subtarget))
return Res;
}
return SDValue();
}
/// Check if a vector extract from a target-specific shuffle of a load can be
/// folded into a single element load.
/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
/// shuffles have been custom lowered so we need to handle those here.
static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
SDValue InVec = N->getOperand(0);
SDValue EltNo = N->getOperand(1);
EVT EltVT = N->getValueType(0);
if (!isa<ConstantSDNode>(EltNo))
return SDValue();
EVT OriginalVT = InVec.getValueType();
// Peek through bitcasts, don't duplicate a load with other uses.
InVec = peekThroughOneUseBitcasts(InVec);
EVT CurrentVT = InVec.getValueType();
if (!CurrentVT.isVector() ||
CurrentVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
return SDValue();
if (!isTargetShuffle(InVec.getOpcode()))
return SDValue();
// Don't duplicate a load with other uses.
if (!InVec.hasOneUse())
return SDValue();
SmallVector<int, 16> ShuffleMask;
SmallVector<SDValue, 2> ShuffleOps;
bool UnaryShuffle;
if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true,
ShuffleOps, ShuffleMask, UnaryShuffle))
return SDValue();
// Select the input vector, guarding against out of range extract vector.
unsigned NumElems = CurrentVT.getVectorNumElements();
int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
int Idx = (Elt > (int)NumElems) ? SM_SentinelUndef : ShuffleMask[Elt];
if (Idx == SM_SentinelZero)
return EltVT.isInteger() ? DAG.getConstant(0, SDLoc(N), EltVT)
: DAG.getConstantFP(+0.0, SDLoc(N), EltVT);
if (Idx == SM_SentinelUndef)
return DAG.getUNDEF(EltVT);
assert(0 <= Idx && Idx < (int)(2 * NumElems) && "Shuffle index out of range");
SDValue LdNode = (Idx < (int)NumElems) ? ShuffleOps[0]
: ShuffleOps[1];
// If inputs to shuffle are the same for both ops, then allow 2 uses
unsigned AllowedUses =
(ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1;
if (LdNode.getOpcode() == ISD::BITCAST) {
// Don't duplicate a load with other uses.
if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
return SDValue();
AllowedUses = 1; // only allow 1 load use if we have a bitcast
LdNode = LdNode.getOperand(0);
}
if (!ISD::isNormalLoad(LdNode.getNode()))
return SDValue();
LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
return SDValue();
// If there's a bitcast before the shuffle, check if the load type and
// alignment is valid.
unsigned Align = LN0->getAlignment();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
EltVT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
return SDValue();
// All checks match so transform back to vector_shuffle so that DAG combiner
// can finish the job
SDLoc dl(N);
// Create shuffle node taking into account the case that its a unary shuffle
SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT) : ShuffleOps[1];
Shuffle = DAG.getVectorShuffle(CurrentVT, dl, ShuffleOps[0], Shuffle,
ShuffleMask);
Shuffle = DAG.getBitcast(OriginalVT, Shuffle);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
EltNo);
}
// Try to match patterns such as
// (i16 bitcast (v16i1 x))
// ->
// (i16 movmsk (16i8 sext (v16i1 x)))
// before the illegal vector is scalarized on subtargets that don't have legal
// vxi1 types.
static SDValue combineBitcastvxi1(SelectionDAG &DAG, SDValue BitCast,
const X86Subtarget &Subtarget) {
EVT VT = BitCast.getValueType();
SDValue N0 = BitCast.getOperand(0);
EVT VecVT = N0->getValueType(0);
if (!VT.isScalarInteger() || !VecVT.isSimple())
return SDValue();
// With AVX512 vxi1 types are legal and we prefer using k-regs.
// MOVMSK is supported in SSE2 or later.
if (Subtarget.hasAVX512() || !Subtarget.hasSSE2())
return SDValue();
// There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
// v8f64. So all legal 128-bit and 256-bit vectors are covered except for
// v8i16 and v16i16.
// For these two cases, we can shuffle the upper element bytes to a
// consecutive sequence at the start of the vector and treat the results as
// v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
// for v16i16 this is not the case, because the shuffle is expensive, so we
// avoid sign-extending to this type entirely.
// For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
// (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
MVT SExtVT;
MVT FPCastVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
switch (VecVT.getSimpleVT().SimpleTy) {
default:
return SDValue();
case MVT::v2i1:
SExtVT = MVT::v2i64;
FPCastVT = MVT::v2f64;
break;
case MVT::v4i1:
SExtVT = MVT::v4i32;
FPCastVT = MVT::v4f32;
// For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
// sign-extend to a 256-bit operation to avoid truncation.
if (N0->getOpcode() == ISD::SETCC && Subtarget.hasAVX() &&
N0->getOperand(0).getValueType().is256BitVector()) {
SExtVT = MVT::v4i64;
FPCastVT = MVT::v4f64;
}
break;
case MVT::v8i1:
SExtVT = MVT::v8i16;
// For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
// sign-extend to a 256-bit operation to match the compare.
// If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
// 256-bit because the shuffle is cheaper than sign extending the result of
// the compare.
if (N0->getOpcode() == ISD::SETCC && Subtarget.hasAVX() &&
(N0->getOperand(0).getValueType().is256BitVector() ||
N0->getOperand(0).getValueType().is512BitVector())) {
SExtVT = MVT::v8i32;
FPCastVT = MVT::v8f32;
}
break;
case MVT::v16i1:
SExtVT = MVT::v16i8;
// For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
// it is not profitable to sign-extend to 256-bit because this will
// require an extra cross-lane shuffle which is more expensive than
// truncating the result of the compare to 128-bits.
break;
case MVT::v32i1:
SExtVT = MVT::v32i8;
break;
};
SDLoc DL(BitCast);
SDValue V = DAG.getSExtOrTrunc(N0, DL, SExtVT);
if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8) {
V = getPMOVMSKB(DL, V, DAG, Subtarget);
return DAG.getZExtOrTrunc(V, DL, VT);
}
if (SExtVT == MVT::v8i16) {
assert(16 == DAG.ComputeNumSignBits(V) && "Expected all/none bit vector");
V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
DAG.getUNDEF(MVT::v8i16));
} else
assert(SExtVT.getScalarType() != MVT::i16 &&
"Vectors of i16 must be packed");
if (FPCastVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
V = DAG.getBitcast(FPCastVT, V);
V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
return DAG.getZExtOrTrunc(V, DL, VT);
}
// Convert a vXi1 constant build vector to the same width scalar integer.
static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
EVT SrcVT = Op.getValueType();
assert(SrcVT.getVectorElementType() == MVT::i1 &&
"Expected a vXi1 vector");
assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
"Expected a constant build vector");
APInt Imm(SrcVT.getVectorNumElements(), 0);
for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
SDValue In = Op.getOperand(Idx);
if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
Imm.setBit(Idx);
}
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
return DAG.getConstant(Imm, SDLoc(Op), IntVT);
}
static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
if (!DCI.isBeforeLegalizeOps())
return SDValue();
// Only do this if we have k-registers.
if (!Subtarget.hasAVX512())
return SDValue();
EVT DstVT = N->getValueType(0);
SDValue Op = N->getOperand(0);
EVT SrcVT = Op.getValueType();
if (!Op.hasOneUse())
return SDValue();
// Look for logic ops.
if (Op.getOpcode() != ISD::AND &&
Op.getOpcode() != ISD::OR &&
Op.getOpcode() != ISD::XOR)
return SDValue();
// Make sure we have a bitcast between mask registers and a scalar type.
if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
DstVT.isScalarInteger()) &&
!(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
SrcVT.isScalarInteger()))
return SDValue();
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
LHS.getOperand(0).getValueType() == DstVT)
return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
DAG.getBitcast(DstVT, RHS));
if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
RHS.getOperand(0).getValueType() == DstVT)
return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
// If the RHS is a vXi1 build vector, this is a good reason to flip too.
// Most of these have to move a constant from the scalar domain anyway.
if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
RHS = combinevXi1ConstantToInteger(RHS, DAG);
return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
DAG.getBitcast(DstVT, LHS), RHS);
}
return SDValue();
}
static SDValue createMMXBuildVector(SDValue N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
unsigned NumElts = N.getNumOperands();
auto *BV = cast<BuildVectorSDNode>(N);
SDValue Splat = BV->getSplatValue();
// Build MMX element from integer GPR or SSE float values.
auto CreateMMXElement = [&](SDValue V) {
if (V.isUndef())
return DAG.getUNDEF(MVT::x86mmx);
if (V.getValueType().isFloatingPoint()) {
if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
V = DAG.getBitcast(MVT::v2i64, V);
return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
}
V = DAG.getBitcast(MVT::i32, V);
} else {
V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
}
return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
};
// Convert build vector ops to MMX data in the bottom elements.
SmallVector<SDValue, 8> Ops;
// Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
if (Splat) {
if (Splat.isUndef())
return DAG.getUNDEF(MVT::x86mmx);
Splat = CreateMMXElement(Splat);
if (Subtarget.hasSSE1()) {
// Unpack v8i8 to splat i8 elements to lowest 16-bits.
if (NumElts == 8)
Splat = DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat,
Splat);
// Use PSHUFW to repeat 16-bit elements.
unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
return DAG.getNode(
ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
DAG.getConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32), Splat,
DAG.getConstant(ShufMask, DL, MVT::i8));
}
Ops.append(NumElts, Splat);
} else {
for (unsigned i = 0; i != NumElts; ++i)
Ops.push_back(CreateMMXElement(N.getOperand(i)));
}
// Use tree of PUNPCKLs to build up general MMX vector.
while (Ops.size() > 1) {
unsigned NumOps = Ops.size();
unsigned IntrinOp =
(NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
: (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
: Intrinsic::x86_mmx_punpcklbw));
SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32);
for (unsigned i = 0; i != NumOps; i += 2)
Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
Ops[i], Ops[i + 1]);
Ops.resize(NumOps / 2);
}
return Ops[0];
}
static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT SrcVT = N0.getValueType();
// Try to match patterns such as
// (i16 bitcast (v16i1 x))
// ->
// (i16 movmsk (16i8 sext (v16i1 x)))
// before the setcc result is scalarized on subtargets that don't have legal
// vxi1 types.
if (DCI.isBeforeLegalize()) {
if (SDValue V = combineBitcastvxi1(DAG, SDValue(N, 0), Subtarget))
return V;
// If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
// type, widen both sides to avoid a trip through memory.
if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
Subtarget.hasAVX512()) {
SDLoc dl(N);
N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
N0 = DAG.getBitcast(MVT::v8i1, N0);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
DAG.getIntPtrConstant(0, dl));
}
// If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
// type, widen both sides to avoid a trip through memory.
if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
Subtarget.hasAVX512()) {
SDLoc dl(N);
unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
Ops[0] = N0;
N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
N0 = DAG.getBitcast(MVT::i8, N0);
return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
}
}
// Since MMX types are special and don't usually play with other vector types,
// it's better to handle them early to be sure we emit efficient code by
// avoiding store-load conversions.
if (VT == MVT::x86mmx) {
// Detect MMX constant vectors.
APInt UndefElts;
SmallVector<APInt, 1> EltBits;
if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
SDLoc DL(N0);
// Handle zero-extension of i32 with MOVD.
if (EltBits[0].countLeadingZeros() >= 32)
return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
// Else, bitcast to a double.
// TODO - investigate supporting sext 32-bit immediates on x86_64.
APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
}
// Detect bitcasts to x86mmx low word.
if (N0.getOpcode() == ISD::BUILD_VECTOR &&
(SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
bool LowUndef = true, AllUndefOrZero = true;
for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
SDValue Op = N0.getOperand(i);
LowUndef &= Op.isUndef() || (i >= e/2);
AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
}
if (AllUndefOrZero) {
SDValue N00 = N0.getOperand(0);
SDLoc dl(N00);
N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
: DAG.getZExtOrTrunc(N00, dl, MVT::i32);
return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
}
}
// Detect bitcasts of 64-bit build vectors and convert to a
// MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
// lowest element.
if (N0.getOpcode() == ISD::BUILD_VECTOR &&
(SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
SrcVT == MVT::v8i8))
return createMMXBuildVector(N0, DAG, Subtarget);
// Detect bitcasts between element or subvector extraction to x86mmx.
if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
isNullConstant(N0.getOperand(1))) {
SDValue N00 = N0.getOperand(0);
if (N00.getValueType().is128BitVector())
return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
DAG.getBitcast(MVT::v2i64, N00));
}
// Detect bitcasts from FP_TO_SINT to x86mmx.
if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
SDLoc DL(N0);
SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
DAG.getUNDEF(MVT::v2i32));
return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
DAG.getBitcast(MVT::v2i64, Res));
}
}
// Try to remove a bitcast of constant vXi1 vector. We have to legalize
// most of these to scalar anyway.
if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
return combinevXi1ConstantToInteger(N0, DAG);
}
if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
isa<ConstantSDNode>(N0)) {
auto *C = cast<ConstantSDNode>(N0);
if (C->isAllOnesValue())
return DAG.getConstant(1, SDLoc(N0), VT);
if (C->isNullValue())
return DAG.getConstant(0, SDLoc(N0), VT);
}
// Try to remove bitcasts from input and output of mask arithmetic to
// remove GPR<->K-register crossings.
if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
return V;
// Convert a bitcasted integer logic operation that has one bitcasted
// floating-point operand into a floating-point logic operation. This may
// create a load of a constant, but that is cheaper than materializing the
// constant in an integer register and transferring it to an SSE register or
// transferring the SSE operand to integer register and back.
unsigned FPOpcode;
switch (N0.getOpcode()) {
case ISD::AND: FPOpcode = X86ISD::FAND; break;
case ISD::OR: FPOpcode = X86ISD::FOR; break;
case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
default: return SDValue();
}
if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
(Subtarget.hasSSE2() && VT == MVT::f64)))
return SDValue();
SDValue LogicOp0 = N0.getOperand(0);
SDValue LogicOp1 = N0.getOperand(1);
SDLoc DL0(N0);
// bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
!isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
}
// bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
!isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
}
return SDValue();
}
// Match a binop + shuffle pyramid that represents a horizontal reduction over
// the elements of a vector.
// Returns the vector that is being reduced on, or SDValue() if a reduction
// was not matched.
static SDValue matchBinOpReduction(SDNode *Extract, unsigned &BinOp,
ArrayRef<ISD::NodeType> CandidateBinOps) {
// The pattern must end in an extract from index 0.
if ((Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT) ||
!isNullConstant(Extract->getOperand(1)))
return SDValue();
SDValue Op = Extract->getOperand(0);
unsigned Stages = Log2_32(Op.getValueType().getVectorNumElements());
// Match against one of the candidate binary ops.
if (llvm::none_of(CandidateBinOps, [Op](ISD::NodeType BinOp) {
return Op.getOpcode() == unsigned(BinOp);
}))
return SDValue();
// At each stage, we're looking for something that looks like:
// %s = shufflevector <8 x i32> %op, <8 x i32> undef,
// <8 x i32> <i32 2, i32 3, i32 undef, i32 undef,
// i32 undef, i32 undef, i32 undef, i32 undef>
// %a = binop <8 x i32> %op, %s
// Where the mask changes according to the stage. E.g. for a 3-stage pyramid,
// we expect something like:
// <4,5,6,7,u,u,u,u>
// <2,3,u,u,u,u,u,u>
// <1,u,u,u,u,u,u,u>
unsigned CandidateBinOp = Op.getOpcode();
for (unsigned i = 0; i < Stages; ++i) {
if (Op.getOpcode() != CandidateBinOp)
return SDValue();
ShuffleVectorSDNode *Shuffle =
dyn_cast<ShuffleVectorSDNode>(Op.getOperand(0).getNode());
if (Shuffle) {
Op = Op.getOperand(1);
} else {
Shuffle = dyn_cast<ShuffleVectorSDNode>(Op.getOperand(1).getNode());
Op = Op.getOperand(0);
}
// The first operand of the shuffle should be the same as the other operand
// of the binop.
if (!Shuffle || Shuffle->getOperand(0) != Op)
return SDValue();
// Verify the shuffle has the expected (at this stage of the pyramid) mask.
for (int Index = 0, MaskEnd = 1 << i; Index < MaskEnd; ++Index)
if (Shuffle->getMaskElt(Index) != MaskEnd + Index)
return SDValue();
}
BinOp = CandidateBinOp;
return Op;
}
// Given a select, detect the following pattern:
// 1: %2 = zext <N x i8> %0 to <N x i32>
// 2: %3 = zext <N x i8> %1 to <N x i32>
// 3: %4 = sub nsw <N x i32> %2, %3
// 4: %5 = icmp sgt <N x i32> %4, [0 x N] or [-1 x N]
// 5: %6 = sub nsw <N x i32> zeroinitializer, %4
// 6: %7 = select <N x i1> %5, <N x i32> %4, <N x i32> %6
// This is useful as it is the input into a SAD pattern.
static bool detectZextAbsDiff(const SDValue &Select, SDValue &Op0,
SDValue &Op1) {
// Check the condition of the select instruction is greater-than.
SDValue SetCC = Select->getOperand(0);
if (SetCC.getOpcode() != ISD::SETCC)
return false;
ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
if (CC != ISD::SETGT && CC != ISD::SETLT)
return false;
SDValue SelectOp1 = Select->getOperand(1);
SDValue SelectOp2 = Select->getOperand(2);
// The following instructions assume SelectOp1 is the subtraction operand
// and SelectOp2 is the negation operand.
// In the case of SETLT this is the other way around.
if (CC == ISD::SETLT)
std::swap(SelectOp1, SelectOp2);
// The second operand of the select should be the negation of the first
// operand, which is implemented as 0 - SelectOp1.
if (!(SelectOp2.getOpcode() == ISD::SUB &&
ISD::isBuildVectorAllZeros(SelectOp2.getOperand(0).getNode()) &&
SelectOp2.getOperand(1) == SelectOp1))
return false;
// The first operand of SetCC is the first operand of the select, which is the
// difference between the two input vectors.
if (SetCC.getOperand(0) != SelectOp1)
return false;
// In SetLT case, The second operand of the comparison can be either 1 or 0.
APInt SplatVal;
if ((CC == ISD::SETLT) &&
!((ISD::isConstantSplatVector(SetCC.getOperand(1).getNode(), SplatVal) &&
SplatVal.isOneValue()) ||
(ISD::isBuildVectorAllZeros(SetCC.getOperand(1).getNode()))))
return false;
// In SetGT case, The second operand of the comparison can be either -1 or 0.
if ((CC == ISD::SETGT) &&
!(ISD::isBuildVectorAllZeros(SetCC.getOperand(1).getNode()) ||
ISD::isBuildVectorAllOnes(SetCC.getOperand(1).getNode())))
return false;
// The first operand of the select is the difference between the two input
// vectors.
if (SelectOp1.getOpcode() != ISD::SUB)
return false;
Op0 = SelectOp1.getOperand(0);
Op1 = SelectOp1.getOperand(1);
// Check if the operands of the sub are zero-extended from vectors of i8.
if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
Op1.getOpcode() != ISD::ZERO_EXTEND ||
Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
return false;
return true;
}
// Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
// to these zexts.
static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
const SDValue &Zext1, const SDLoc &DL,
const X86Subtarget &Subtarget) {
// Find the appropriate width for the PSADBW.
EVT InVT = Zext0.getOperand(0).getValueType();
unsigned RegSize = std::max(128u, InVT.getSizeInBits());
// "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
// fill in the missing vector elements with 0.
unsigned NumConcat = RegSize / InVT.getSizeInBits();
SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
Ops[0] = Zext0.getOperand(0);
MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
Ops[0] = Zext1.getOperand(0);
SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
// Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
};
MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
PSADBWBuilder);
}
// Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
// PHMINPOSUW.
static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Bail without SSE41.
if (!Subtarget.hasSSE41())
return SDValue();
EVT ExtractVT = Extract->getValueType(0);
if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
return SDValue();
// Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
unsigned BinOp;
SDValue Src = matchBinOpReduction(
Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN});
if (!Src)
return SDValue();
EVT SrcVT = Src.getValueType();
EVT SrcSVT = SrcVT.getScalarType();
if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
return SDValue();
SDLoc DL(Extract);
SDValue MinPos = Src;
// First, reduce the source down to 128-bit, applying BinOp to lo/hi.
while (SrcVT.getSizeInBits() > 128) {
unsigned NumElts = SrcVT.getVectorNumElements();
unsigned NumSubElts = NumElts / 2;
SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcSVT, NumSubElts);
unsigned SubSizeInBits = SrcVT.getSizeInBits();
SDValue Lo = extractSubVector(MinPos, 0, DAG, DL, SubSizeInBits);
SDValue Hi = extractSubVector(MinPos, NumSubElts, DAG, DL, SubSizeInBits);
MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
}
assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
(SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
"Unexpected value type");
// PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
// to flip the value accordingly.
SDValue Mask;
unsigned MaskEltsBits = ExtractVT.getSizeInBits();
if (BinOp == ISD::SMAX)
Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
else if (BinOp == ISD::SMIN)
Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
else if (BinOp == ISD::UMAX)
Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);
if (Mask)
MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
// For v16i8 cases we need to perform UMIN on pairs of byte elements,
// shuffling each upper element down and insert zeros. This means that the
// v16i8 UMIN will leave the upper element as zero, performing zero-extension
// ready for the PHMINPOS.
if (ExtractVT == MVT::i8) {
SDValue Upper = DAG.getVectorShuffle(
SrcVT, DL, MinPos, getZeroVector(MVT::v16i8, Subtarget, DAG, DL),
{1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
}
// Perform the PHMINPOS on a v8i16 vector,
MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
MinPos = DAG.getBitcast(SrcVT, MinPos);
if (Mask)
MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
DAG.getIntPtrConstant(0, DL));
}
// Attempt to replace an all_of/any_of style horizontal reduction with a MOVMSK.
static SDValue combineHorizontalPredicateResult(SDNode *Extract,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Bail without SSE2 or with AVX512VL (which uses predicate registers).
if (!Subtarget.hasSSE2() || Subtarget.hasVLX())
return SDValue();
EVT ExtractVT = Extract->getValueType(0);
unsigned BitWidth = ExtractVT.getSizeInBits();
if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
ExtractVT != MVT::i8)
return SDValue();
// Check for OR(any_of) and AND(all_of) horizontal reduction patterns.
unsigned BinOp = 0;
SDValue Match = matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
if (!Match)
return SDValue();
// EXTRACT_VECTOR_ELT can require implicit extension of the vector element
// which we can't support here for now.
if (Match.getScalarValueSizeInBits() != BitWidth)
return SDValue();
// We require AVX2 for PMOVMSKB for v16i16/v32i8;
unsigned MatchSizeInBits = Match.getValueSizeInBits();
if (!(MatchSizeInBits == 128 ||
(MatchSizeInBits == 256 &&
((Subtarget.hasAVX() && BitWidth >= 32) || Subtarget.hasAVX2()))))
return SDValue();
// Don't bother performing this for 2-element vectors.
if (Match.getValueType().getVectorNumElements() <= 2)
return SDValue();
// Check that we are extracting a reduction of all sign bits.
if (DAG.ComputeNumSignBits(Match) != BitWidth)
return SDValue();
// For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
MVT MaskVT;
if (64 == BitWidth || 32 == BitWidth)
MaskVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
MatchSizeInBits / BitWidth);
else
MaskVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
APInt CompareBits;
ISD::CondCode CondCode;
if (BinOp == ISD::OR) {
// any_of -> MOVMSK != 0
CompareBits = APInt::getNullValue(32);
CondCode = ISD::CondCode::SETNE;
} else {
// all_of -> MOVMSK == ((1 << NumElts) - 1)
CompareBits = APInt::getLowBitsSet(32, MaskVT.getVectorNumElements());
CondCode = ISD::CondCode::SETEQ;
}
// Perform the select as i32/i64 and then truncate to avoid partial register
// stalls.
unsigned ResWidth = std::max(BitWidth, 32u);
EVT ResVT = EVT::getIntegerVT(*DAG.getContext(), ResWidth);
SDLoc DL(Extract);
SDValue Zero = DAG.getConstant(0, DL, ResVT);
SDValue Ones = DAG.getAllOnesConstant(DL, ResVT);
SDValue Res = DAG.getBitcast(MaskVT, Match);
Res = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Res);
Res = DAG.getSelectCC(DL, Res, DAG.getConstant(CompareBits, DL, MVT::i32),
Ones, Zero, CondCode);
return DAG.getSExtOrTrunc(Res, DL, ExtractVT);
}
static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// PSADBW is only supported on SSE2 and up.
if (!Subtarget.hasSSE2())
return SDValue();
// Verify the type we're extracting from is any integer type above i16.
EVT VT = Extract->getOperand(0).getValueType();
if (!VT.isSimple() || !(VT.getVectorElementType().getSizeInBits() > 16))
return SDValue();
unsigned RegSize = 128;
if (Subtarget.useBWIRegs())
RegSize = 512;
else if (Subtarget.hasAVX())
RegSize = 256;
// We handle upto v16i* for SSE2 / v32i* for AVX / v64i* for AVX512.
// TODO: We should be able to handle larger vectors by splitting them before
// feeding them into several SADs, and then reducing over those.
if (RegSize / VT.getVectorNumElements() < 8)
return SDValue();
// Match shuffle + add pyramid.
unsigned BinOp = 0;
SDValue Root = matchBinOpReduction(Extract, BinOp, {ISD::ADD});
// The operand is expected to be zero extended from i8
// (verified in detectZextAbsDiff).
// In order to convert to i64 and above, additional any/zero/sign
// extend is expected.
// The zero extend from 32 bit has no mathematical effect on the result.
// Also the sign extend is basically zero extend
// (extends the sign bit which is zero).
// So it is correct to skip the sign/zero extend instruction.
if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
Root.getOpcode() == ISD::ZERO_EXTEND ||
Root.getOpcode() == ISD::ANY_EXTEND))
Root = Root.getOperand(0);
// If there was a match, we want Root to be a select that is the root of an
// abs-diff pattern.
if (!Root || (Root.getOpcode() != ISD::VSELECT))
return SDValue();
// Check whether we have an abs-diff pattern feeding into the select.
SDValue Zext0, Zext1;
if (!detectZextAbsDiff(Root, Zext0, Zext1))
return SDValue();
// Create the SAD instruction.
SDLoc DL(Extract);
SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
// If the original vector was wider than 8 elements, sum over the results
// in the SAD vector.
unsigned Stages = Log2_32(VT.getVectorNumElements());
MVT SadVT = SAD.getSimpleValueType();
if (Stages > 3) {
unsigned SadElems = SadVT.getVectorNumElements();
for(unsigned i = Stages - 3; i > 0; --i) {
SmallVector<int, 16> Mask(SadElems, -1);
for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
Mask[j] = MaskEnd + j;
SDValue Shuffle =
DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
}
}
MVT Type = Extract->getSimpleValueType(0);
unsigned TypeSizeInBits = Type.getSizeInBits();
// Return the lowest TypeSizeInBits bits.
MVT ResVT = MVT::getVectorVT(Type, SadVT.getSizeInBits() / TypeSizeInBits);
SAD = DAG.getBitcast(ResVT, SAD);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Type, SAD,
Extract->getOperand(1));
}
// Attempt to peek through a target shuffle and extract the scalar from the
// source.
static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
SDValue Src = N->getOperand(0);
SDValue Idx = N->getOperand(1);
EVT VT = N->getValueType(0);
EVT SrcVT = Src.getValueType();
EVT SrcSVT = SrcVT.getVectorElementType();
unsigned NumSrcElts = SrcVT.getVectorNumElements();
// Don't attempt this for boolean mask vectors or unknown extraction indices.
if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
return SDValue();
// Handle extract(broadcast(scalar_value)), it doesn't matter what index is.
if (X86ISD::VBROADCAST == Src.getOpcode() &&
Src.getOperand(0).getValueType() == VT)
return Src.getOperand(0);
// Resolve the target shuffle inputs and mask.
SmallVector<int, 16> Mask;
SmallVector<SDValue, 2> Ops;
if (!resolveTargetShuffleInputs(peekThroughBitcasts(Src), Ops, Mask, DAG))
return SDValue();
// Attempt to narrow/widen the shuffle mask to the correct size.
if (Mask.size() != NumSrcElts) {
if ((NumSrcElts % Mask.size()) == 0) {
SmallVector<int, 16> ScaledMask;
int Scale = NumSrcElts / Mask.size();
scaleShuffleMask<int>(Scale, Mask, ScaledMask);
Mask = std::move(ScaledMask);
} else if ((Mask.size() % NumSrcElts) == 0) {
SmallVector<int, 16> WidenedMask;
while (Mask.size() > NumSrcElts &&
canWidenShuffleElements(Mask, WidenedMask))
Mask = std::move(WidenedMask);
// TODO - investigate support for wider shuffle masks with known upper
// undef/zero elements for implicit zero-extension.
}
}
// Check if narrowing/widening failed.
if (Mask.size() != NumSrcElts)
return SDValue();
int SrcIdx = Mask[N->getConstantOperandVal(1)];
SDLoc dl(N);
// If the shuffle source element is undef/zero then we can just accept it.
if (SrcIdx == SM_SentinelUndef)
return DAG.getUNDEF(VT);
if (SrcIdx == SM_SentinelZero)
return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
: DAG.getConstant(0, dl, VT);
SDValue SrcOp = Ops[SrcIdx / Mask.size()];
SrcOp = DAG.getBitcast(SrcVT, SrcOp);
SrcIdx = SrcIdx % Mask.size();
// We can only extract other elements from 128-bit vectors and in certain
// circumstances, depending on SSE-level.
// TODO: Investigate using extract_subvector for larger vectors.
// TODO: Investigate float/double extraction if it will be just stored.
if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
assert(SrcSVT == VT && "Unexpected extraction type");
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
DAG.getIntPtrConstant(SrcIdx, dl));
}
if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
(SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
assert(VT.getSizeInBits() >= SrcSVT.getSizeInBits() &&
"Unexpected extraction type");
unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
DAG.getIntPtrConstant(SrcIdx, dl));
return DAG.getZExtOrTrunc(ExtOp, dl, VT);
}
return SDValue();
}
/// Detect vector gather/scatter index generation and convert it from being a
/// bunch of shuffles and extracts into a somewhat faster sequence.
/// For i686, the best sequence is apparently storing the value and loading
/// scalars back, while for x64 we should use 64-bit extracts and shifts.
static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
return NewOp;
// TODO - Remove this once we can handle the implicit zero-extension of
// X86ISD::PEXTRW/X86ISD::PEXTRB in:
// XFormVExtractWithShuffleIntoLoad, combineHorizontalPredicateResult and
// combineBasicSADPattern.
if (N->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI))
return NewOp;
SDValue InputVector = N->getOperand(0);
SDValue EltIdx = N->getOperand(1);
EVT SrcVT = InputVector.getValueType();
EVT VT = N->getValueType(0);
SDLoc dl(InputVector);
// Detect mmx extraction of all bits as a i64. It works better as a bitcast.
if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
SDValue MMXSrc = InputVector.getOperand(0);
// The bitcast source is a direct mmx result.
if (MMXSrc.getValueType() == MVT::x86mmx)
return DAG.getBitcast(VT, InputVector);
}
// Detect mmx to i32 conversion through a v2i32 elt extract.
if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
SDValue MMXSrc = InputVector.getOperand(0);
// The bitcast source is a direct mmx result.
if (MMXSrc.getValueType() == MVT::x86mmx)
return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
}
if (VT == MVT::i1 && InputVector.getOpcode() == ISD::BITCAST &&
isa<ConstantSDNode>(EltIdx) &&
isa<ConstantSDNode>(InputVector.getOperand(0))) {
uint64_t ExtractedElt = N->getConstantOperandVal(1);
auto *InputC = cast<ConstantSDNode>(InputVector.getOperand(0));
const APInt &InputValue = InputC->getAPIntValue();
uint64_t Res = InputValue[ExtractedElt];
return DAG.getConstant(Res, dl, MVT::i1);
}
// Check whether this extract is the root of a sum of absolute differences
// pattern. This has to be done here because we really want it to happen
// pre-legalization,
if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
return SAD;
// Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
return Cmp;
// Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget))
return MinMax;
return SDValue();
}
/// If a vector select has an operand that is -1 or 0, try to simplify the
/// select to a bitwise logic operation.
/// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
static SDValue
combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue Cond = N->getOperand(0);
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
EVT VT = LHS.getValueType();
EVT CondVT = Cond.getValueType();
SDLoc DL(N);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (N->getOpcode() != ISD::VSELECT)
return SDValue();
assert(CondVT.isVector() && "Vector select expects a vector selector!");
bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
// Check if the first operand is all zeros and Cond type is vXi1.
// This situation only applies to avx512.
if (TValIsAllZeros && Subtarget.hasAVX512() && Cond.hasOneUse() &&
CondVT.getVectorElementType() == MVT::i1) {
// Invert the cond to not(cond) : xor(op,allones)=not(op)
SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
// Vselect cond, op1, op2 = Vselect not(cond), op2, op1
return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
}
// To use the condition operand as a bitwise mask, it must have elements that
// are the same size as the select elements. Ie, the condition operand must
// have already been promoted from the IR select condition type <N x i1>.
// Don't check if the types themselves are equal because that excludes
// vector floating-point selects.
if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
return SDValue();
bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
// Try to invert the condition if true value is not all 1s and false value is
// not all 0s.
if (!TValIsAllOnes && !FValIsAllZeros &&
// Check if the selector will be produced by CMPP*/PCMP*.
Cond.getOpcode() == ISD::SETCC &&
// Check if SETCC has already been promoted.
TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
CondVT) {
bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
if (TValIsAllZeros || FValIsAllOnes) {
SDValue CC = Cond.getOperand(2);
ISD::CondCode NewCC =
ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
Cond.getOperand(0).getValueType().isInteger());
Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
NewCC);
std::swap(LHS, RHS);
TValIsAllOnes = FValIsAllOnes;
FValIsAllZeros = TValIsAllZeros;
}
}
// Cond value must be 'sign splat' to be converted to a logical op.
if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
return SDValue();
// vselect Cond, 111..., 000... -> Cond
if (TValIsAllOnes && FValIsAllZeros)
return DAG.getBitcast(VT, Cond);
if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
return SDValue();
// vselect Cond, 111..., X -> or Cond, X
if (TValIsAllOnes) {
SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
return DAG.getBitcast(VT, Or);
}
// vselect Cond, X, 000... -> and Cond, X
if (FValIsAllZeros) {
SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
return DAG.getBitcast(VT, And);
}
// vselect Cond, 000..., X -> andn Cond, X
if (TValIsAllZeros) {
MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64);
SDValue CastCond = DAG.getBitcast(AndNVT, Cond);
SDValue CastRHS = DAG.getBitcast(AndNVT, RHS);
SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS);
return DAG.getBitcast(VT, AndN);
}
return SDValue();
}
static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
SDValue Cond = N->getOperand(0);
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
SDLoc DL(N);
auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
if (!TrueC || !FalseC)
return SDValue();
// Don't do this for crazy integer types.
EVT VT = N->getValueType(0);
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
// We're going to use the condition bit in math or logic ops. We could allow
// this with a wider condition value (post-legalization it becomes an i8),
// but if nothing is creating selects that late, it doesn't matter.
if (Cond.getValueType() != MVT::i1)
return SDValue();
// A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
// 3, 5, or 9 with i32/i64, so those get transformed too.
// TODO: For constants that overflow or do not differ by power-of-2 or small
// multiplier, convert to 'and' + 'add'.
const APInt &TrueVal = TrueC->getAPIntValue();
const APInt &FalseVal = FalseC->getAPIntValue();
bool OV;
APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
if (OV)
return SDValue();
APInt AbsDiff = Diff.abs();
if (AbsDiff.isPowerOf2() ||
((VT == MVT::i32 || VT == MVT::i64) &&
(AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
// We need a positive multiplier constant for shift/LEA codegen. The 'not'
// of the condition can usually be folded into a compare predicate, but even
// without that, the sequence should be cheaper than a CMOV alternative.
if (TrueVal.slt(FalseVal)) {
Cond = DAG.getNOT(DL, Cond, MVT::i1);
std::swap(TrueC, FalseC);
}
// select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
// Multiply condition by the difference if non-one.
if (!AbsDiff.isOneValue())
R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
// Add the base if non-zero.
if (!FalseC->isNullValue())
R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
return R;
}
return SDValue();
}
/// If this is a *dynamic* select (non-constant condition) and we can match
/// this node with one of the variable blend instructions, restructure the
/// condition so that blends can use the high (sign) bit of each element.
static SDValue combineVSelectToShrunkBlend(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue Cond = N->getOperand(0);
if (N->getOpcode() != ISD::VSELECT ||
ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
return SDValue();
// Don't optimize before the condition has been transformed to a legal type
// and don't ever optimize vector selects that map to AVX512 mask-registers.
unsigned BitWidth = Cond.getScalarValueSizeInBits();
if (BitWidth < 8 || BitWidth > 64)
return SDValue();
// We can only handle the cases where VSELECT is directly legal on the
// subtarget. We custom lower VSELECT nodes with constant conditions and
// this makes it hard to see whether a dynamic VSELECT will correctly
// lower, so we both check the operation's status and explicitly handle the
// cases where a *dynamic* blend will fail even though a constant-condition
// blend could be custom lowered.
// FIXME: We should find a better way to handle this class of problems.
// Potentially, we should combine constant-condition vselect nodes
// pre-legalization into shuffles and not mark as many types as custom
// lowered.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = N->getValueType(0);
if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
return SDValue();
// FIXME: We don't support i16-element blends currently. We could and
// should support them by making *all* the bits in the condition be set
// rather than just the high bit and using an i8-element blend.
if (VT.getVectorElementType() == MVT::i16)
return SDValue();
// Dynamic blending was only available from SSE4.1 onward.
if (VT.is128BitVector() && !Subtarget.hasSSE41())
return SDValue();
// Byte blends are only available in AVX2
if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
return SDValue();
// There are no 512-bit blend instructions that use sign bits.
if (VT.is512BitVector())
return SDValue();
// TODO: Add other opcodes eventually lowered into BLEND.
for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
UI != UE; ++UI)
if (UI->getOpcode() != ISD::VSELECT || UI.getOperandNo() != 0)
return SDValue();
APInt DemandedMask(APInt::getSignMask(BitWidth));
KnownBits Known;
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
if (!TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO, 0, true))
return SDValue();
// If we changed the computation somewhere in the DAG, this change will
// affect all users of Cond. Update all the nodes so that we do not use
// the generic VSELECT anymore. Otherwise, we may perform wrong
// optimizations as we messed with the actual expectation for the vector
// boolean values.
for (SDNode *U : Cond->uses()) {
SDValue SB = DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(U), U->getValueType(0),
Cond, U->getOperand(1), U->getOperand(2));
DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
}
DCI.CommitTargetLoweringOpt(TLO);
return SDValue(N, 0);
}
/// Do target-specific dag combines on SELECT and VSELECT nodes.
static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
SDValue Cond = N->getOperand(0);
// Get the LHS/RHS of the select.
SDValue LHS = N->getOperand(1);
SDValue RHS = N->getOperand(2);
EVT VT = LHS.getValueType();
EVT CondVT = Cond.getValueType();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// Convert vselects with constant condition into shuffles.
if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
DCI.isBeforeLegalizeOps()) {
SmallVector<int, 64> Mask(VT.getVectorNumElements(), -1);
for (int i = 0, Size = Mask.size(); i != Size; ++i) {
SDValue CondElt = Cond->getOperand(i);
Mask[i] = i;
// Arbitrarily choose from the 2nd operand if the select condition element
// is undef.
// TODO: Can we do better by matching patterns such as even/odd?
if (CondElt.isUndef() || isNullConstant(CondElt))
Mask[i] += Size;
}
return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
}
// If we have SSE[12] support, try to form min/max nodes. SSE min/max
// instructions match the semantics of the common C idiom x<y?x:y but not
// x<=y?x:y, because of how they handle negative zero (which can be
// ignored in unsafe-math mode).
// We also try to create v2f32 min/max nodes, which we later widen to v4f32.
if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
VT != MVT::f80 && VT != MVT::f128 &&
(TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
(Subtarget.hasSSE2() ||
(Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
unsigned Opcode = 0;
// Check for x CC y ? x : y.
if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
DAG.isEqualTo(RHS, Cond.getOperand(1))) {
switch (CC) {
default: break;
case ISD::SETULT:
// Converting this to a min would handle NaNs incorrectly, and swapping
// the operands would cause it to handle comparisons between positive
// and negative zero incorrectly.
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
if (!DAG.getTarget().Options.UnsafeFPMath &&
!(DAG.isKnownNeverZeroFloat(LHS) ||
DAG.isKnownNeverZeroFloat(RHS)))
break;
std::swap(LHS, RHS);
}
Opcode = X86ISD::FMIN;
break;
case ISD::SETOLE:
// Converting this to a min would handle comparisons between positive
// and negative zero incorrectly.
if (!DAG.getTarget().Options.UnsafeFPMath &&
!DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
break;
Opcode = X86ISD::FMIN;
break;
case ISD::SETULE:
// Converting this to a min would handle both negative zeros and NaNs
// incorrectly, but we can swap the operands to fix both.
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
case ISD::SETOLT:
case ISD::SETLT:
case ISD::SETLE:
Opcode = X86ISD::FMIN;
break;
case ISD::SETOGE:
// Converting this to a max would handle comparisons between positive
// and negative zero incorrectly.
if (!DAG.getTarget().Options.UnsafeFPMath &&
!DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
break;
Opcode = X86ISD::FMAX;
break;
case ISD::SETUGT:
// Converting this to a max would handle NaNs incorrectly, and swapping
// the operands would cause it to handle comparisons between positive
// and negative zero incorrectly.
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
if (!DAG.getTarget().Options.UnsafeFPMath &&
!(DAG.isKnownNeverZeroFloat(LHS) ||
DAG.isKnownNeverZeroFloat(RHS)))
break;
std::swap(LHS, RHS);
}
Opcode = X86ISD::FMAX;
break;
case ISD::SETUGE:
// Converting this to a max would handle both negative zeros and NaNs
// incorrectly, but we can swap the operands to fix both.
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
case ISD::SETOGT:
case ISD::SETGT:
case ISD::SETGE:
Opcode = X86ISD::FMAX;
break;
}
// Check for x CC y ? y : x -- a min/max with reversed arms.
} else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
DAG.isEqualTo(RHS, Cond.getOperand(0))) {
switch (CC) {
default: break;
case ISD::SETOGE:
// Converting this to a min would handle comparisons between positive
// and negative zero incorrectly, and swapping the operands would
// cause it to handle NaNs incorrectly.
if (!DAG.getTarget().Options.UnsafeFPMath &&
!(DAG.isKnownNeverZeroFloat(LHS) ||
DAG.isKnownNeverZeroFloat(RHS))) {
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
break;
std::swap(LHS, RHS);
}
Opcode = X86ISD::FMIN;
break;
case ISD::SETUGT:
// Converting this to a min would handle NaNs incorrectly.
if (!DAG.getTarget().Options.UnsafeFPMath &&
(!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
break;
Opcode = X86ISD::FMIN;
break;
case ISD::SETUGE:
// Converting this to a min would handle both negative zeros and NaNs
// incorrectly, but we can swap the operands to fix both.
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
case ISD::SETOGT:
case ISD::SETGT:
case ISD::SETGE:
Opcode = X86ISD::FMIN;
break;
case ISD::SETULT:
// Converting this to a max would handle NaNs incorrectly.
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
break;
Opcode = X86ISD::FMAX;
break;
case ISD::SETOLE:
// Converting this to a max would handle comparisons between positive
// and negative zero incorrectly, and swapping the operands would
// cause it to handle NaNs incorrectly.
if (!DAG.getTarget().Options.UnsafeFPMath &&
!DAG.isKnownNeverZeroFloat(LHS) &&
!DAG.isKnownNeverZeroFloat(RHS)) {
if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
break;
std::swap(LHS, RHS);
}
Opcode = X86ISD::FMAX;
break;
case ISD::SETULE:
// Converting this to a max would handle both negative zeros and NaNs
// incorrectly, but we can swap the operands to fix both.
std::swap(LHS, RHS);
LLVM_FALLTHROUGH;
case ISD::SETOLT:
case ISD::SETLT:
case ISD::SETLE:
Opcode = X86ISD::FMAX;
break;
}
}
if (Opcode)
return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
}
// Some mask scalar intrinsics rely on checking if only one bit is set
// and implement it in C code like this:
// A[0] = (U & 1) ? A[0] : W[0];
// This creates some redundant instructions that break pattern matching.
// fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
SDValue AndNode = Cond.getOperand(0);
if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
isNullConstant(Cond.getOperand(1)) &&
isOneConstant(AndNode.getOperand(1))) {
// LHS and RHS swapped due to
// setcc outputting 1 when AND resulted in 0 and vice versa.
AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
}
}
// v16i8 (select v16i1, v16i8, v16i8) does not have a proper
// lowering on KNL. In this case we convert it to
// v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
// The same situation all vectors of i8 and i16 without BWI.
// Make sure we extend these even before type legalization gets a chance to
// split wide vectors.
// Since SKX these selects have a proper lowering.
if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
CondVT.getVectorElementType() == MVT::i1 &&
VT.getVectorNumElements() > 4 &&
(VT.getVectorElementType() == MVT::i8 ||
VT.getVectorElementType() == MVT::i16)) {
Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
}
if (SDValue V = combineSelectOfTwoConstants(N, DAG))
return V;
// Canonicalize max and min:
// (x > y) ? x : y -> (x >= y) ? x : y
// (x < y) ? x : y -> (x <= y) ? x : y
// This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
// the need for an extra compare
// against zero. e.g.
// (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
// subl %esi, %edi
// testl %edi, %edi
// movl $0, %eax
// cmovgl %edi, %eax
// =>
// xorl %eax, %eax
// subl %esi, $edi
// cmovsl %eax, %edi
if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
DAG.isEqualTo(RHS, Cond.getOperand(1))) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
switch (CC) {
default: break;
case ISD::SETLT:
case ISD::SETGT: {
ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
Cond.getOperand(0), Cond.getOperand(1), NewCC);
return DAG.getSelect(DL, VT, Cond, LHS, RHS);
}
}
}
// Early exit check
if (!TLI.isTypeLegal(VT))
return SDValue();
// Match VSELECTs into subs with unsigned saturation.
if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
// psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
((Subtarget.hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
(Subtarget.hasAVX() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
// Check if one of the arms of the VSELECT is a zero vector. If it's on the
// left side invert the predicate to simplify logic below.
SDValue Other;
if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
Other = RHS;
CC = ISD::getSetCCInverse(CC, true);
} else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
Other = LHS;
}
if (Other.getNode() && Other->getNumOperands() == 2 &&
DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
SDValue CondRHS = Cond->getOperand(1);
auto SUBUSBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(X86ISD::SUBUS, DL, Ops[0].getValueType(), Ops);
};
// Look for a general sub with unsigned saturation first.
// x >= y ? x-y : 0 --> subus x, y
// x > y ? x-y : 0 --> subus x, y
if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS },
SUBUSBuilder);
if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
if (isa<BuildVectorSDNode>(CondRHS)) {
// If the RHS is a constant we have to reverse the const
// canonicalization.
// x > C-1 ? x+-C : 0 --> subus x, C
auto MatchSUBUS = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
return Cond->getAPIntValue() == (-Op->getAPIntValue() - 1);
};
if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchSUBUS)) {
OpRHS = DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(0, DL, VT), OpRHS);
return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS },
SUBUSBuilder);
}
// Another special case: If C was a sign bit, the sub has been
// canonicalized into a xor.
// FIXME: Would it be better to use computeKnownBits to determine
// whether it's safe to decanonicalize the xor?
// x s< 0 ? x^C : 0 --> subus x, C
if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode())
if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
OpRHSConst->getAPIntValue().isSignMask()) {
OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
// Note that we have to rebuild the RHS constant here to ensure we
// don't rely on particular values of undef lanes.
return SplitOpsAndApply(DAG, Subtarget, DL, VT, { OpLHS, OpRHS },
SUBUSBuilder);
}
}
}
}
if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
return V;
if (SDValue V = combineVSelectToShrunkBlend(N, DAG, DCI, Subtarget))
return V;
// Custom action for SELECT MMX
if (VT == MVT::x86mmx) {
LHS = DAG.getBitcast(MVT::i64, LHS);
RHS = DAG.getBitcast(MVT::i64, RHS);
SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::i64, Cond, LHS, RHS);
return DAG.getBitcast(VT, newSelect);
}
return SDValue();
}
/// Combine:
/// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
/// to:
/// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
/// i.e., reusing the EFLAGS produced by the LOCKed instruction.
/// Note that this is only legal for some op/cc combinations.
static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// This combine only operates on CMP-like nodes.
if (!(Cmp.getOpcode() == X86ISD::CMP ||
(Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
return SDValue();
// Can't replace the cmp if it has more uses than the one we're looking at.
// FIXME: We would like to be able to handle this, but would need to make sure
// all uses were updated.
if (!Cmp.hasOneUse())
return SDValue();
// This only applies to variations of the common case:
// (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
// (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
// (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
// (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
// Using the proper condcodes (see below), overflow is checked for.
// FIXME: We can generalize both constraints:
// - XOR/OR/AND (if they were made to survive AtomicExpand)
// - LHS != 1
// if the result is compared.
SDValue CmpLHS = Cmp.getOperand(0);
SDValue CmpRHS = Cmp.getOperand(1);
if (!CmpLHS.hasOneUse())
return SDValue();
unsigned Opc = CmpLHS.getOpcode();
if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
return SDValue();
SDValue OpRHS = CmpLHS.getOperand(2);
auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
if (!OpRHSC)
return SDValue();
APInt Addend = OpRHSC->getAPIntValue();
if (Opc == ISD::ATOMIC_LOAD_SUB)
Addend = -Addend;
auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
if (!CmpRHSC)
return SDValue();
APInt Comparison = CmpRHSC->getAPIntValue();
// If the addend is the negation of the comparison value, then we can do
// a full comparison by emitting the atomic arithmetic as a locked sub.
if (Comparison == -Addend) {
// The CC is fine, but we need to rewrite the LHS of the comparison as an
// atomic sub.
auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
auto AtomicSub = DAG.getAtomic(
ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
/*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
/*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
AN->getMemOperand());
// If the comparision uses the CF flag we can't use INC/DEC instructions.
bool NeedCF = false;
switch (CC) {
default: break;
case X86::COND_A: case X86::COND_AE:
case X86::COND_B: case X86::COND_BE:
NeedCF = true;
break;
}
auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget, !NeedCF);
DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
DAG.getUNDEF(CmpLHS.getValueType()));
DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
return LockOp;
}
// We can handle comparisons with zero in a number of cases by manipulating
// the CC used.
if (!Comparison.isNullValue())
return SDValue();
if (CC == X86::COND_S && Addend == 1)
CC = X86::COND_LE;
else if (CC == X86::COND_NS && Addend == 1)
CC = X86::COND_G;
else if (CC == X86::COND_G && Addend == -1)
CC = X86::COND_GE;
else if (CC == X86::COND_LE && Addend == -1)
CC = X86::COND_L;
else
return SDValue();
SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
DAG.getUNDEF(CmpLHS.getValueType()));
DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
return LockOp;
}
// Check whether a boolean test is testing a boolean value generated by
// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
// code.
//
// Simplify the following patterns:
// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
// to (Op EFLAGS Cond)
//
// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
// to (Op EFLAGS !Cond)
//
// where Op could be BRCOND or CMOV.
//
static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
// This combine only operates on CMP-like nodes.
if (!(Cmp.getOpcode() == X86ISD::CMP ||
(Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
return SDValue();
// Quit if not used as a boolean value.
if (CC != X86::COND_E && CC != X86::COND_NE)
return SDValue();
// Check CMP operands. One of them should be 0 or 1 and the other should be
// an SetCC or extended from it.
SDValue Op1 = Cmp.getOperand(0);
SDValue Op2 = Cmp.getOperand(1);
SDValue SetCC;
const ConstantSDNode* C = nullptr;
bool needOppositeCond = (CC == X86::COND_E);
bool checkAgainstTrue = false; // Is it a comparison against 1?
if ((C = dyn_cast<ConstantSDNode>(Op1)))
SetCC = Op2;
else if ((C = dyn_cast<ConstantSDNode>(Op2)))
SetCC = Op1;
else // Quit if all operands are not constants.
return SDValue();
if (C->getZExtValue() == 1) {
needOppositeCond = !needOppositeCond;
checkAgainstTrue = true;
} else if (C->getZExtValue() != 0)
// Quit if the constant is neither 0 or 1.
return SDValue();
bool truncatedToBoolWithAnd = false;
// Skip (zext $x), (trunc $x), or (and $x, 1) node.
while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
SetCC.getOpcode() == ISD::TRUNCATE ||
SetCC.getOpcode() == ISD::AND) {
if (SetCC.getOpcode() == ISD::AND) {
int OpIdx = -1;
if (isOneConstant(SetCC.getOperand(0)))
OpIdx = 1;
if (isOneConstant(SetCC.getOperand(1)))
OpIdx = 0;
if (OpIdx < 0)
break;
SetCC = SetCC.getOperand(OpIdx);
truncatedToBoolWithAnd = true;
} else
SetCC = SetCC.getOperand(0);
}
switch (SetCC.getOpcode()) {
case X86ISD::SETCC_CARRY:
// Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
// simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
// i.e. it's a comparison against true but the result of SETCC_CARRY is not
// truncated to i1 using 'and'.
if (checkAgainstTrue && !truncatedToBoolWithAnd)
break;
assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
"Invalid use of SETCC_CARRY!");
LLVM_FALLTHROUGH;
case X86ISD::SETCC:
// Set the condition code or opposite one if necessary.
CC = X86::CondCode(SetCC.getConstantOperandVal(0));
if (needOppositeCond)
CC = X86::GetOppositeBranchCondition(CC);
return SetCC.getOperand(1);
case X86ISD::CMOV: {
// Check whether false/true value has canonical one, i.e. 0 or 1.
ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
// Quit if true value is not a constant.
if (!TVal)
return SDValue();
// Quit if false value is not a constant.
if (!FVal) {
SDValue Op = SetCC.getOperand(0);
// Skip 'zext' or 'trunc' node.
if (Op.getOpcode() == ISD::ZERO_EXTEND ||
Op.getOpcode() == ISD::TRUNCATE)
Op = Op.getOperand(0);
// A special case for rdrand/rdseed, where 0 is set if false cond is
// found.
if ((Op.getOpcode() != X86ISD::RDRAND &&
Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
return SDValue();
}
// Quit if false value is not the constant 0 or 1.
bool FValIsFalse = true;
if (FVal && FVal->getZExtValue() != 0) {
if (FVal->getZExtValue() != 1)
return SDValue();
// If FVal is 1, opposite cond is needed.
needOppositeCond = !needOppositeCond;
FValIsFalse = false;
}
// Quit if TVal is not the constant opposite of FVal.
if (FValIsFalse && TVal->getZExtValue() != 1)
return SDValue();
if (!FValIsFalse && TVal->getZExtValue() != 0)
return SDValue();
CC = X86::CondCode(SetCC.getConstantOperandVal(2));
if (needOppositeCond)
CC = X86::GetOppositeBranchCondition(CC);
return SetCC.getOperand(3);
}
}
return SDValue();
}
/// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
/// Match:
/// (X86or (X86setcc) (X86setcc))
/// (X86cmp (and (X86setcc) (X86setcc)), 0)
static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
X86::CondCode &CC1, SDValue &Flags,
bool &isAnd) {
if (Cond->getOpcode() == X86ISD::CMP) {
if (!isNullConstant(Cond->getOperand(1)))
return false;
Cond = Cond->getOperand(0);
}
isAnd = false;
SDValue SetCC0, SetCC1;
switch (Cond->getOpcode()) {
default: return false;
case ISD::AND:
case X86ISD::AND:
isAnd = true;
LLVM_FALLTHROUGH;
case ISD::OR:
case X86ISD::OR:
SetCC0 = Cond->getOperand(0);
SetCC1 = Cond->getOperand(1);
break;
};
// Make sure we have SETCC nodes, using the same flags value.
if (SetCC0.getOpcode() != X86ISD::SETCC ||
SetCC1.getOpcode() != X86ISD::SETCC ||
SetCC0->getOperand(1) != SetCC1->getOperand(1))
return false;
CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
Flags = SetCC0->getOperand(1);
return true;
}
// When legalizing carry, we create carries via add X, -1
// If that comes from an actual carry, via setcc, we use the
// carry directly.
static SDValue combineCarryThroughADD(SDValue EFLAGS) {
if (EFLAGS.getOpcode() == X86ISD::ADD) {
if (isAllOnesConstant(EFLAGS.getOperand(1))) {
SDValue Carry = EFLAGS.getOperand(0);
while (Carry.getOpcode() == ISD::TRUNCATE ||
Carry.getOpcode() == ISD::ZERO_EXTEND ||
Carry.getOpcode() == ISD::SIGN_EXTEND ||
Carry.getOpcode() == ISD::ANY_EXTEND ||
(Carry.getOpcode() == ISD::AND &&
isOneConstant(Carry.getOperand(1))))
Carry = Carry.getOperand(0);
if (Carry.getOpcode() == X86ISD::SETCC ||
Carry.getOpcode() == X86ISD::SETCC_CARRY) {
if (Carry.getConstantOperandVal(0) == X86::COND_B)
return Carry.getOperand(1);
}
}
}
return SDValue();
}
/// Optimize an EFLAGS definition used according to the condition code \p CC
/// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
/// uses of chain values.
static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (CC == X86::COND_B)
if (SDValue Flags = combineCarryThroughADD(EFLAGS))
return Flags;
if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
return R;
return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
}
/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
SDValue FalseOp = N->getOperand(0);
SDValue TrueOp = N->getOperand(1);
X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
SDValue Cond = N->getOperand(3);
// Try to simplify the EFLAGS and condition code operands.
// We can't always do this as FCMOV only supports a subset of X86 cond.
if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
SDValue Ops[] = {FalseOp, TrueOp, DAG.getConstant(CC, DL, MVT::i8),
Flags};
return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
}
}
// If this is a select between two integer constants, try to do some
// optimizations. Note that the operands are ordered the opposite of SELECT
// operands.
if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
// Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
// larger than FalseC (the false value).
if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
CC = X86::GetOppositeBranchCondition(CC);
std::swap(TrueC, FalseC);
std::swap(TrueOp, FalseOp);
}
// Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
// This is efficient for any integer data type (including i8/i16) and
// shift amount.
if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
Cond = getSETCC(CC, Cond, DL, DAG);
// Zero extend the condition if needed.
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
unsigned ShAmt = TrueC->getAPIntValue().logBase2();
Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
DAG.getConstant(ShAmt, DL, MVT::i8));
return Cond;
}
// Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
// for any integer data type, including i8/i16.
if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
Cond = getSETCC(CC, Cond, DL, DAG);
// Zero extend the condition if needed.
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
FalseC->getValueType(0), Cond);
Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
SDValue(FalseC, 0));
return Cond;
}
// Optimize cases that will turn into an LEA instruction. This requires
// an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
bool isFastMultiplier = false;
if (Diff < 10) {
switch ((unsigned char)Diff) {
default: break;
case 1: // result = add base, cond
case 2: // result = lea base( , cond*2)
case 3: // result = lea base(cond, cond*2)
case 4: // result = lea base( , cond*4)
case 5: // result = lea base(cond, cond*4)
case 8: // result = lea base( , cond*8)
case 9: // result = lea base(cond, cond*8)
isFastMultiplier = true;
break;
}
}
if (isFastMultiplier) {
APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
Cond = getSETCC(CC, Cond, DL ,DAG);
// Zero extend the condition if needed.
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
Cond);
// Scale the condition by the difference.
if (Diff != 1)
Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
DAG.getConstant(Diff, DL, Cond.getValueType()));
// Add the base if non-zero.
if (FalseC->getAPIntValue() != 0)
Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
SDValue(FalseC, 0));
return Cond;
}
}
}
}
// Handle these cases:
// (select (x != c), e, c) -> select (x != c), e, x),
// (select (x == c), c, e) -> select (x == c), x, e)
// where the c is an integer constant, and the "select" is the combination
// of CMOV and CMP.
//
// The rationale for this change is that the conditional-move from a constant
// needs two instructions, however, conditional-move from a register needs
// only one instruction.
//
// CAVEAT: By replacing a constant with a symbolic value, it may obscure
// some instruction-combining opportunities. This opt needs to be
// postponed as late as possible.
//
if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
// the DCI.xxxx conditions are provided to postpone the optimization as
// late as possible.
ConstantSDNode *CmpAgainst = nullptr;
if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
(CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
!isa<ConstantSDNode>(Cond.getOperand(0))) {
if (CC == X86::COND_NE &&
CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
CC = X86::GetOppositeBranchCondition(CC);
std::swap(TrueOp, FalseOp);
}
if (CC == X86::COND_E &&
CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
SDValue Ops[] = { FalseOp, Cond.getOperand(0),
DAG.getConstant(CC, DL, MVT::i8), Cond };
return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
}
}
}
// Fold and/or of setcc's to double CMOV:
// (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
// (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
//
// This combine lets us generate:
// cmovcc1 (jcc1 if we don't have CMOV)
// cmovcc2 (same)
// instead of:
// setcc1
// setcc2
// and/or
// cmovne (jne if we don't have CMOV)
// When we can't use the CMOV instruction, it might increase branch
// mispredicts.
// When we can use CMOV, or when there is no mispredict, this improves
// throughput and reduces register pressure.
//
if (CC == X86::COND_NE) {
SDValue Flags;
X86::CondCode CC0, CC1;
bool isAndSetCC;
if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
if (isAndSetCC) {
std::swap(FalseOp, TrueOp);
CC0 = X86::GetOppositeBranchCondition(CC0);
CC1 = X86::GetOppositeBranchCondition(CC1);
}
SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, DL, MVT::i8),
Flags};
SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, DL, MVT::i8), Flags};
SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
return CMOV;
}
}
// Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
// (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
// Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
// (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
if ((CC == X86::COND_NE || CC == X86::COND_E) &&
Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
SDValue Add = TrueOp;
SDValue Const = FalseOp;
// Canonicalize the condition code for easier matching and output.
if (CC == X86::COND_E) {
std::swap(Add, Const);
CC = X86::COND_NE;
}
// Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
(Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
EVT VT = N->getValueType(0);
// This should constant fold.
SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
DAG.getConstant(CC, DL, MVT::i8), Cond);
return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
}
}
return SDValue();
}
/// Different mul shrinking modes.
enum ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
EVT VT = N->getOperand(0).getValueType();
if (VT.getScalarSizeInBits() != 32)
return false;
assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
unsigned SignBits[2] = {1, 1};
bool IsPositive[2] = {false, false};
for (unsigned i = 0; i < 2; i++) {
SDValue Opd = N->getOperand(i);
// DAG.ComputeNumSignBits return 1 for ISD::ANY_EXTEND, so we need to
// compute signbits for it separately.
if (Opd.getOpcode() == ISD::ANY_EXTEND) {
// For anyextend, it is safe to assume an appropriate number of leading
// sign/zero bits.
if (Opd.getOperand(0).getValueType().getVectorElementType() == MVT::i8)
SignBits[i] = 25;
else if (Opd.getOperand(0).getValueType().getVectorElementType() ==
MVT::i16)
SignBits[i] = 17;
else
return false;
IsPositive[i] = true;
} else if (Opd.getOpcode() == ISD::BUILD_VECTOR) {
// All the operands of BUILD_VECTOR need to be int constant.
// Find the smallest value range which all the operands belong to.
SignBits[i] = 32;
IsPositive[i] = true;
for (const SDValue &SubOp : Opd.getNode()->op_values()) {
if (SubOp.isUndef())
continue;
auto *CN = dyn_cast<ConstantSDNode>(SubOp);
if (!CN)
return false;
APInt IntVal = CN->getAPIntValue();
if (IntVal.isNegative())
IsPositive[i] = false;
SignBits[i] = std::min(SignBits[i], IntVal.getNumSignBits());
}
} else {
SignBits[i] = DAG.ComputeNumSignBits(Opd);
if (Opd.getOpcode() == ISD::ZERO_EXTEND)
IsPositive[i] = true;
}
}
bool AllPositive = IsPositive[0] && IsPositive[1];
unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
// When ranges are from -128 ~ 127, use MULS8 mode.
if (MinSignBits >= 25)
Mode = MULS8;
// When ranges are from 0 ~ 255, use MULU8 mode.
else if (AllPositive && MinSignBits >= 24)
Mode = MULU8;
// When ranges are from -32768 ~ 32767, use MULS16 mode.
else if (MinSignBits >= 17)
Mode = MULS16;
// When ranges are from 0 ~ 65535, use MULU16 mode.
else if (AllPositive && MinSignBits >= 16)
Mode = MULU16;
else
return false;
return true;
}
/// When the operands of vector mul are extended from smaller size values,
/// like i8 and i16, the type of mul may be shrinked to generate more
/// efficient code. Two typical patterns are handled:
/// Pattern1:
/// %2 = sext/zext <N x i8> %1 to <N x i32>
/// %4 = sext/zext <N x i8> %3 to <N x i32>
// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
/// %5 = mul <N x i32> %2, %4
///
/// Pattern2:
/// %2 = zext/sext <N x i16> %1 to <N x i32>
/// %4 = zext/sext <N x i16> %3 to <N x i32>
/// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
/// %5 = mul <N x i32> %2, %4
///
/// There are four mul shrinking modes:
/// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
/// -128 to 128, and the scalar value range of %4 is also -128 to 128,
/// generate pmullw+sext32 for it (MULS8 mode).
/// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
/// 0 to 255, and the scalar value range of %4 is also 0 to 255,
/// generate pmullw+zext32 for it (MULU8 mode).
/// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
/// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
/// generate pmullw+pmulhw for it (MULS16 mode).
/// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
/// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
/// generate pmullw+pmulhuw for it (MULU16 mode).
static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Check for legality
// pmullw/pmulhw are not supported by SSE.
if (!Subtarget.hasSSE2())
return SDValue();
// Check for profitability
// pmulld is supported since SSE41. It is better to use pmulld
// instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
// the expansion.
bool OptForMinSize = DAG.getMachineFunction().getFunction().optForMinSize();
if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
return SDValue();
ShrinkMode Mode;
if (!canReduceVMulWidth(N, DAG, Mode))
return SDValue();
SDLoc DL(N);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getOperand(0).getValueType();
unsigned NumElts = VT.getVectorNumElements();
if ((NumElts % 2) != 0)
return SDValue();
unsigned RegSize = 128;
MVT OpsVT = MVT::getVectorVT(MVT::i16, RegSize / 16);
EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
// Shrink the operands of mul.
SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
if (NumElts >= OpsVT.getVectorNumElements()) {
// Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
// lower part is needed.
SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
if (Mode == MULU8 || Mode == MULS8) {
return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
DL, VT, MulLo);
} else {
MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2);
// Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
// the higher part is also needed.
SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
ReducedVT, NewN0, NewN1);
// Repack the lower part and higher part result of mul into a wider
// result.
// Generate shuffle functioning as punpcklwd.
SmallVector<int, 16> ShuffleMask(NumElts);
for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
ShuffleMask[2 * i] = i;
ShuffleMask[2 * i + 1] = i + NumElts;
}
SDValue ResLo =
DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
ResLo = DAG.getBitcast(ResVT, ResLo);
// Generate shuffle functioning as punpckhwd.
for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
ShuffleMask[2 * i] = i + NumElts / 2;
ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
}
SDValue ResHi =
DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
ResHi = DAG.getBitcast(ResVT, ResHi);
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
}
} else {
// When VT.getVectorNumElements() < OpsVT.getVectorNumElements(), we want
// to legalize the mul explicitly because implicit legalization for type
// <4 x i16> to <4 x i32> sometimes involves unnecessary unpack
// instructions which will not exist when we explicitly legalize it by
// extending <4 x i16> to <8 x i16> (concatenating the <4 x i16> val with
// <4 x i16> undef).
//
// Legalize the operands of mul.
// FIXME: We may be able to handle non-concatenated vectors by insertion.
unsigned ReducedSizeInBits = ReducedVT.getSizeInBits();
if ((RegSize % ReducedSizeInBits) != 0)
return SDValue();
SmallVector<SDValue, 16> Ops(RegSize / ReducedSizeInBits,
DAG.getUNDEF(ReducedVT));
Ops[0] = NewN0;
NewN0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
Ops[0] = NewN1;
NewN1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops);
if (Mode == MULU8 || Mode == MULS8) {
// Generate lower part of mul: pmullw. For MULU8/MULS8, only the lower
// part is needed.
SDValue Mul = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
// convert the type of mul result to VT.
MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
SDValue Res = DAG.getNode(Mode == MULU8 ? ISD::ZERO_EXTEND_VECTOR_INREG
: ISD::SIGN_EXTEND_VECTOR_INREG,
DL, ResVT, Mul);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
DAG.getIntPtrConstant(0, DL));
} else {
// Generate the lower and higher part of mul: pmulhw/pmulhuw. For
// MULU16/MULS16, both parts are needed.
SDValue MulLo = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1);
SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
OpsVT, NewN0, NewN1);
// Repack the lower part and higher part result of mul into a wider
// result. Make sure the type of mul result is VT.
MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
SDValue Res = getUnpackl(DAG, DL, OpsVT, MulLo, MulHi);
Res = DAG.getBitcast(ResVT, Res);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
DAG.getIntPtrConstant(0, DL));
}
}
}
static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
EVT VT, const SDLoc &DL) {
auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
DAG.getConstant(Mult, DL, VT));
Result = DAG.getNode(ISD::SHL, DL, VT, Result,
DAG.getConstant(Shift, DL, MVT::i8));
Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
N->getOperand(0));
return Result;
};
auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
DAG.getConstant(Mul1, DL, VT));
Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
DAG.getConstant(Mul2, DL, VT));
Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
N->getOperand(0));
return Result;
};
switch (MulAmt) {
default:
break;
case 11:
// mul x, 11 => add ((shl (mul x, 5), 1), x)
return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
case 21:
// mul x, 21 => add ((shl (mul x, 5), 2), x)
return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
case 41:
// mul x, 41 => add ((shl (mul x, 5), 3), x)
return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
case 22:
// mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
case 19:
// mul x, 19 => add ((shl (mul x, 9), 1), x)
return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
case 37:
// mul x, 37 => add ((shl (mul x, 9), 2), x)
return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
case 73:
// mul x, 73 => add ((shl (mul x, 9), 3), x)
return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
case 13:
// mul x, 13 => add ((shl (mul x, 3), 2), x)
return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
case 23:
// mul x, 23 => sub ((shl (mul x, 3), 3), x)
return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
case 26:
// mul x, 26 => add ((mul (mul x, 5), 5), x)
return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
case 28:
// mul x, 28 => add ((mul (mul x, 9), 3), x)
return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
case 29:
// mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
}
// Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
// by a single LEA.
// First check if this a sum of two power of 2s because that's easy. Then
// count how many zeros are up to the first bit.
// TODO: We can do this even without LEA at a cost of two shifts and an add.
if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
unsigned ScaleShift = countTrailingZeros(MulAmt);
if (ScaleShift >= 1 && ScaleShift < 4) {
unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(ShiftAmt, DL, MVT::i8));
SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(ScaleShift, DL, MVT::i8));
return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
}
}
return SDValue();
}
// If the upper 17 bits of each element are zero then we can use PMADDWD,
// which is always at least as quick as PMULLD, expect on KNL.
static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasSSE2())
return SDValue();
if (Subtarget.getProcFamily() == X86Subtarget::IntelKNL)
return SDValue();
EVT VT = N->getValueType(0);
// Only support vXi32 vectors.
if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
return SDValue();
// Make sure the vXi16 type is legal. This covers the AVX512 without BWI case.
MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements());
if (!DAG.getTargetLoweringInfo().isTypeLegal(WVT))
return SDValue();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
APInt Mask17 = APInt::getHighBitsSet(32, 17);
if (!DAG.MaskedValueIsZero(N1, Mask17) ||
!DAG.MaskedValueIsZero(N0, Mask17))
return SDValue();
// Use SplitOpsAndApply to handle AVX splitting.
auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
return DAG.getNode(X86ISD::VPMADDWD, DL, VT, Ops);
};
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
{ DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
PMADDWDBuilder);
}
static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasSSE2())
return SDValue();
EVT VT = N->getValueType(0);
// Only support vXi64 vectors.
if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// MULDQ returns the 64-bit result of the signed multiplication of the lower
// 32-bits. We can lower with this if the sign bits stretch that far.
if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
DAG.ComputeNumSignBits(N1) > 32) {
auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
};
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
PMULDQBuilder, /*CheckBWI*/false);
}
// If the upper bits are zero we can use a single pmuludq.
APInt Mask = APInt::getHighBitsSet(64, 32);
if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
};
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
PMULUDQBuilder, /*CheckBWI*/false);
}
return SDValue();
}
/// Optimize a single multiply with constant into two operations in order to
/// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
return V;
if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
return V;
if (DCI.isBeforeLegalize() && VT.isVector())
return reduceVMULWidth(N, DAG, Subtarget);
if (!MulConstantOptimization)
return SDValue();
// An imul is usually smaller than the alternative sequence.
if (DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
return SDValue();
if (VT != MVT::i64 && VT != MVT::i32)
return SDValue();
ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!C)
return SDValue();
if (isPowerOf2_64(C->getZExtValue()))
return SDValue();
int64_t SignMulAmt = C->getSExtValue();
assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
SDLoc DL(N);
if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
DAG.getConstant(AbsMulAmt, DL, VT));
if (SignMulAmt < 0)
NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
NewMul);
return NewMul;
}
uint64_t MulAmt1 = 0;
uint64_t MulAmt2 = 0;
if ((AbsMulAmt % 9) == 0) {
MulAmt1 = 9;
MulAmt2 = AbsMulAmt / 9;
} else if ((AbsMulAmt % 5) == 0) {
MulAmt1 = 5;
MulAmt2 = AbsMulAmt / 5;
} else if ((AbsMulAmt % 3) == 0) {
MulAmt1 = 3;
MulAmt2 = AbsMulAmt / 3;
}
SDValue NewMul;
// For negative multiply amounts, only allow MulAmt2 to be a power of 2.
if (MulAmt2 &&
(isPowerOf2_64(MulAmt2) ||
(SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
if (isPowerOf2_64(MulAmt2) &&
!(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
// If second multiplifer is pow2, issue it first. We want the multiply by
// 3, 5, or 9 to be folded into the addressing mode unless the lone use
// is an add.
std::swap(MulAmt1, MulAmt2);
if (isPowerOf2_64(MulAmt1))
NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
else
NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
DAG.getConstant(MulAmt1, DL, VT));
if (isPowerOf2_64(MulAmt2))
NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
else
NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
DAG.getConstant(MulAmt2, DL, VT));
// Negate the result.
if (SignMulAmt < 0)
NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
NewMul);
} else if (!Subtarget.slowLEA())
NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
if (!NewMul) {
assert(C->getZExtValue() != 0 &&
C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
"Both cases that could cause potential overflows should have "
"already been handled.");
if (isPowerOf2_64(AbsMulAmt - 1)) {
// (mul x, 2^N + 1) => (add (shl x, N), x)
NewMul = DAG.getNode(
ISD::ADD, DL, VT, N->getOperand(0),
DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
MVT::i8)));
// To negate, subtract the number from zero
if (SignMulAmt < 0)
NewMul = DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(0, DL, VT), NewMul);
} else if (isPowerOf2_64(AbsMulAmt + 1)) {
// (mul x, 2^N - 1) => (sub (shl x, N), x)
NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(Log2_64(AbsMulAmt + 1),
DL, MVT::i8));
// To negate, reverse the operands of the subtract.
if (SignMulAmt < 0)
NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
else
NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
} else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
// (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(Log2_64(AbsMulAmt - 2),
DL, MVT::i8));
NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
} else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
// (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
DAG.getConstant(Log2_64(AbsMulAmt + 2),
DL, MVT::i8));
NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
}
}
return NewMul;
}
static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
EVT VT = N0.getValueType();
// fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
// since the result of setcc_c is all zero's or all ones.
if (VT.isInteger() && !VT.isVector() &&
N1C && N0.getOpcode() == ISD::AND &&
N0.getOperand(1).getOpcode() == ISD::Constant) {
SDValue N00 = N0.getOperand(0);
APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
Mask <<= N1C->getAPIntValue();
bool MaskOK = false;
// We can handle cases concerning bit-widening nodes containing setcc_c if
// we carefully interrogate the mask to make sure we are semantics
// preserving.
// The transform is not safe if the result of C1 << C2 exceeds the bitwidth
// of the underlying setcc_c operation if the setcc_c was zero extended.
// Consider the following example:
// zext(setcc_c) -> i32 0x0000FFFF
// c1 -> i32 0x0000FFFF
// c2 -> i32 0x00000001
// (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
// (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
MaskOK = true;
} else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
MaskOK = true;
} else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
N00.getOpcode() == ISD::ANY_EXTEND) &&
N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
}
if (MaskOK && Mask != 0) {
SDLoc DL(N);
return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
}
}
// Hardware support for vector shifts is sparse which makes us scalarize the
// vector operations in many cases. Also, on sandybridge ADD is faster than
// shl.
// (shl V, 1) -> add V,V
if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
assert(N0.getValueType().isVector() && "Invalid vector shift type");
// We shift all of the values by one. In many cases we do not have
// hardware support for this operation. This is better expressed as an ADD
// of two values.
if (N1SplatC->getAPIntValue() == 1)
return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
}
return SDValue();
}
static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
unsigned Size = VT.getSizeInBits();
// fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
// into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
// into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
// depending on sign of (SarConst - [56,48,32,24,16])
// sexts in X86 are MOVs. The MOVs have the same code size
// as above SHIFTs (only SHIFT on 1 has lower code size).
// However the MOVs have 2 advantages to a SHIFT:
// 1. MOVs can write to a register that differs from source
// 2. MOVs accept memory operands
if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
N0.getOperand(1).getOpcode() != ISD::Constant)
return SDValue();
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
EVT CVT = N1.getValueType();
if (SarConst.isNegative())
return SDValue();
for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
unsigned ShiftSize = SVT.getSizeInBits();
// skipping types without corresponding sext/zext and
// ShlConst that is not one of [56,48,32,24,16]
if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
continue;
SDLoc DL(N);
SDValue NN =
DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
SarConst = SarConst - (Size - ShiftSize);
if (SarConst == 0)
return NN;
else if (SarConst.isNegative())
return DAG.getNode(ISD::SHL, DL, VT, NN,
DAG.getConstant(-SarConst, DL, CVT));
else
return DAG.getNode(ISD::SRA, DL, VT, NN,
DAG.getConstant(SarConst, DL, CVT));
}
return SDValue();
}
static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N0.getValueType();
// Only do this on the last DAG combine as it can interfere with other
// combines.
if (!DCI.isAfterLegalizeDAG())
return SDValue();
// Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
// TODO: This is a generic DAG combine that became an x86-only combine to
// avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
// and-not ('andn').
if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
return SDValue();
auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!ShiftC || !AndC)
return SDValue();
// If we can shrink the constant mask below 8-bits or 32-bits, then this
// transform should reduce code size. It may also enable secondary transforms
// from improved known-bits analysis or instruction selection.
APInt MaskVal = AndC->getAPIntValue();
// If this can be matched by a zero extend, don't optimize.
if (MaskVal.isMask()) {
unsigned TO = MaskVal.countTrailingOnes();
if (TO >= 8 && isPowerOf2_32(TO))
return SDValue();
}
APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
unsigned OldMaskSize = MaskVal.getMinSignedBits();
unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
(OldMaskSize > 32 && NewMaskSize <= 32)) {
// srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
SDLoc DL(N);
SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
}
return SDValue();
}
static SDValue combineShift(SDNode* N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (N->getOpcode() == ISD::SHL)
if (SDValue V = combineShiftLeft(N, DAG))
return V;
if (N->getOpcode() == ISD::SRA)
if (SDValue V = combineShiftRightArithmetic(N, DAG))
return V;
if (N->getOpcode() == ISD::SRL)
if (SDValue V = combineShiftRightLogical(N, DAG, DCI))
return V;
return SDValue();
}
static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
unsigned Opcode = N->getOpcode();
assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
"Unexpected shift opcode");
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
unsigned DstBitsPerElt = VT.getScalarSizeInBits();
unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
"Unexpected PACKSS/PACKUS input type");
// Constant Folding.
APInt UndefElts0, UndefElts1;
SmallVector<APInt, 32> EltBits0, EltBits1;
if ((N0->isUndef() || N->isOnlyUserOf(N0.getNode())) &&
(N1->isUndef() || N->isOnlyUserOf(N1.getNode())) &&
getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
unsigned NumLanes = VT.getSizeInBits() / 128;
unsigned NumDstElts = VT.getVectorNumElements();
unsigned NumSrcElts = NumDstElts / 2;
unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
bool IsSigned = (X86ISD::PACKSS == Opcode);
APInt Undefs(NumDstElts, 0);
SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
if (UndefElts[SrcIdx]) {
Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
continue;
}
APInt &Val = EltBits[SrcIdx];
if (IsSigned) {
// PACKSS: Truncate signed value with signed saturation.
// Source values less than dst minint are saturated to minint.
// Source values greater than dst maxint are saturated to maxint.
if (Val.isSignedIntN(DstBitsPerElt))
Val = Val.trunc(DstBitsPerElt);
else if (Val.isNegative())
Val = APInt::getSignedMinValue(DstBitsPerElt);
else
Val = APInt::getSignedMaxValue(DstBitsPerElt);
} else {
// PACKUS: Truncate signed value with unsigned saturation.
// Source values less than zero are saturated to zero.
// Source values greater than dst maxuint are saturated to maxuint.
if (Val.isIntN(DstBitsPerElt))
Val = Val.trunc(DstBitsPerElt);
else if (Val.isNegative())
Val = APInt::getNullValue(DstBitsPerElt);
else
Val = APInt::getAllOnesValue(DstBitsPerElt);
}
Bits[Lane * NumDstEltsPerLane + Elt] = Val;
}
}
return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
}
// Attempt to combine as shuffle.
SDValue Op(N, 0);
if (SDValue Res =
combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false, DAG, Subtarget))
return Res;
return SDValue();
}
static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
unsigned Opcode = N->getOpcode();
assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
X86ISD::VSRLI == Opcode) &&
"Unexpected shift opcode");
bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
unsigned NumBitsPerElt = VT.getScalarSizeInBits();
assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
"Unexpected value type");
// Out of range logical bit shifts are guaranteed to be zero.
// Out of range arithmetic bit shifts splat the sign bit.
APInt ShiftVal = cast<ConstantSDNode>(N1)->getAPIntValue();
if (ShiftVal.zextOrTrunc(8).uge(NumBitsPerElt)) {
if (LogicalShift)
return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(N));
else
ShiftVal = NumBitsPerElt - 1;
}
// Shift N0 by zero -> N0.
if (!ShiftVal)
return N0;
// Shift zero -> zero.
if (ISD::isBuildVectorAllZeros(N0.getNode()))
return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(N));
// fold (VSRLI (VSRAI X, Y), 31) -> (VSRLI X, 31).
// This VSRLI only looks at the sign bit, which is unmodified by VSRAI.
// TODO - support other sra opcodes as needed.
if (Opcode == X86ISD::VSRLI && (ShiftVal + 1) == NumBitsPerElt &&
N0.getOpcode() == X86ISD::VSRAI)
return DAG.getNode(X86ISD::VSRLI, SDLoc(N), VT, N0.getOperand(0), N1);
// fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSHLI &&
N1 == N0.getOperand(1)) {
SDValue N00 = N0.getOperand(0);
unsigned NumSignBits = DAG.ComputeNumSignBits(N00);
if (ShiftVal.ult(NumSignBits))
return N00;
}
// We can decode 'whole byte' logical bit shifts as shuffles.
if (LogicalShift && (ShiftVal.getZExtValue() % 8) == 0) {
SDValue Op(N, 0);
if (SDValue Res = combineX86ShufflesRecursively(
{Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false, DAG, Subtarget))
return Res;
}
// Constant Folding.
APInt UndefElts;
SmallVector<APInt, 32> EltBits;
if (N->isOnlyUserOf(N0.getNode()) &&
getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
assert(EltBits.size() == VT.getVectorNumElements() &&
"Unexpected shift value type");
unsigned ShiftImm = ShiftVal.getZExtValue();
for (APInt &Elt : EltBits) {
if (X86ISD::VSHLI == Opcode)
Elt <<= ShiftImm;
else if (X86ISD::VSRAI == Opcode)
Elt.ashrInPlace(ShiftImm);
else
Elt.lshrInPlace(ShiftImm);
}
return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
}
return SDValue();
}
static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
assert(
((N->getOpcode() == X86ISD::PINSRB && N->getValueType(0) == MVT::v16i8) ||
(N->getOpcode() == X86ISD::PINSRW &&
N->getValueType(0) == MVT::v8i16)) &&
"Unexpected vector insertion");
// Attempt to combine PINSRB/PINSRW patterns to a shuffle.
SDValue Op(N, 0);
if (SDValue Res =
combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false, DAG, Subtarget))
return Res;
return SDValue();
}
/// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
/// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
/// OR -> CMPNEQSS.
static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
unsigned opcode;
// SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
// we're requiring SSE2 for both.
if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDValue CMP0 = N0->getOperand(1);
SDValue CMP1 = N1->getOperand(1);
SDLoc DL(N);
// The SETCCs should both refer to the same CMP.
if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
return SDValue();
SDValue CMP00 = CMP0->getOperand(0);
SDValue CMP01 = CMP0->getOperand(1);
EVT VT = CMP00.getValueType();
if (VT == MVT::f32 || VT == MVT::f64) {
bool ExpectingFlags = false;
// Check for any users that want flags:
for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
!ExpectingFlags && UI != UE; ++UI)
switch (UI->getOpcode()) {
default:
case ISD::BR_CC:
case ISD::BRCOND:
case ISD::SELECT:
ExpectingFlags = true;
break;
case ISD::CopyToReg:
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
break;
}
if (!ExpectingFlags) {
enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
X86::CondCode tmp = cc0;
cc0 = cc1;
cc1 = tmp;
}
if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
(cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
// FIXME: need symbolic constants for these magic numbers.
// See X86ATTInstPrinter.cpp:printSSECC().
unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
if (Subtarget.hasAVX512()) {
SDValue FSetCC =
DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
DAG.getConstant(x86cc, DL, MVT::i8));
// Need to fill with zeros to ensure the bitcast will produce zeroes
// for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
DAG.getConstant(0, DL, MVT::v16i1),
FSetCC, DAG.getIntPtrConstant(0, DL));
return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
N->getSimpleValueType(0));
}
SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
CMP00.getValueType(), CMP00, CMP01,
DAG.getConstant(x86cc, DL,
MVT::i8));
bool is64BitFP = (CMP00.getValueType() == MVT::f64);
MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
if (is64BitFP && !Subtarget.is64Bit()) {
// On a 32-bit target, we cannot bitcast the 64-bit float to a
// 64-bit integer, since that's not a legal type. Since
// OnesOrZeroesF is all ones of all zeroes, we don't need all the
// bits, but can do this little dance to extract the lowest 32 bits
// and work with those going forward.
SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
OnesOrZeroesF);
SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
Vector32, DAG.getIntPtrConstant(0, DL));
IntVT = MVT::i32;
}
SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
DAG.getConstant(1, DL, IntVT));
SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
ANDed);
return OneBitOfTruth;
}
}
}
}
return SDValue();
}
// Try to match (and (xor X, -1), Y) logic pattern for (andnp X, Y) combines.
static bool matchANDXORWithAllOnesAsANDNP(SDNode *N, SDValue &X, SDValue &Y) {
if (N->getOpcode() != ISD::AND)
return false;
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
if (N0.getOpcode() == ISD::XOR &&
ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) {
X = N0.getOperand(0);
Y = N1;
return true;
}
if (N1.getOpcode() == ISD::XOR &&
ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) {
X = N1.getOperand(0);
Y = N0;
return true;
}
return false;
}
/// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == ISD::AND);
EVT VT = N->getValueType(0);
if (VT != MVT::v2i64 && VT != MVT::v4i64 && VT != MVT::v8i64)
return SDValue();
SDValue X, Y;
if (matchANDXORWithAllOnesAsANDNP(N, X, Y))
return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
return SDValue();
}
// On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
// register. In most cases we actually compare or select YMM-sized registers
// and mixing the two types creates horrible code. This method optimizes
// some of the transition sequences.
// Even with AVX-512 this is still useful for removing casts around logical
// operations on vXi1 mask types.
static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
assert(VT.isVector() && "Expected vector type");
assert((N->getOpcode() == ISD::ANY_EXTEND ||
N->getOpcode() == ISD::ZERO_EXTEND ||
N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
SDValue Narrow = N->getOperand(0);
EVT NarrowVT = Narrow.getValueType();
if (Narrow->getOpcode() != ISD::XOR &&
Narrow->getOpcode() != ISD::AND &&
Narrow->getOpcode() != ISD::OR)
return SDValue();
SDValue N0 = Narrow->getOperand(0);
SDValue N1 = Narrow->getOperand(1);
SDLoc DL(Narrow);
// The Left side has to be a trunc.
if (N0.getOpcode() != ISD::TRUNCATE)
return SDValue();
// The type of the truncated inputs.
if (N0->getOperand(0).getValueType() != VT)
return SDValue();
// The right side has to be a 'trunc' or a constant vector.
bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getValueType() == VT;
if (!RHSTrunc &&
!ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
return SDValue();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), VT))
return SDValue();
// Set N0 and N1 to hold the inputs to the new wide operation.
N0 = N0->getOperand(0);
if (RHSTrunc)
N1 = N1->getOperand(0);
else
N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
// Generate the wide operation.
SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, VT, N0, N1);
unsigned Opcode = N->getOpcode();
switch (Opcode) {
default: llvm_unreachable("Unexpected opcode");
case ISD::ANY_EXTEND:
return Op;
case ISD::ZERO_EXTEND:
return DAG.getZeroExtendInReg(Op, DL, NarrowVT.getScalarType());
case ISD::SIGN_EXTEND:
return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
Op, DAG.getValueType(NarrowVT));
}
}
/// If both input operands of a logic op are being cast from floating point
/// types, try to convert this into a floating point logic node to avoid
/// unnecessary moves from SSE to integer registers.
static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
unsigned FPOpcode = ISD::DELETED_NODE;
if (N->getOpcode() == ISD::AND)
FPOpcode = X86ISD::FAND;
else if (N->getOpcode() == ISD::OR)
FPOpcode = X86ISD::FOR;
else if (N->getOpcode() == ISD::XOR)
FPOpcode = X86ISD::FXOR;
assert(FPOpcode != ISD::DELETED_NODE &&
"Unexpected input node for FP logic conversion");
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
SDLoc DL(N);
if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
((Subtarget.hasSSE1() && VT == MVT::i32) ||
(Subtarget.hasSSE2() && VT == MVT::i64))) {
SDValue N00 = N0.getOperand(0);
SDValue N10 = N1.getOperand(0);
EVT N00Type = N00.getValueType();
EVT N10Type = N10.getValueType();
if (N00Type.isFloatingPoint() && N10Type.isFloatingPoint()) {
SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
return DAG.getBitcast(VT, FPLogic);
}
}
return SDValue();
}
/// If this is a zero/all-bits result that is bitwise-anded with a low bits
/// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
/// with a shift-right to eliminate loading the vector constant mask value.
static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
EVT VT0 = Op0.getValueType();
EVT VT1 = Op1.getValueType();
if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
return SDValue();
APInt SplatVal;
if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
!SplatVal.isMask())
return SDValue();
if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
return SDValue();
unsigned EltBitWidth = VT0.getScalarSizeInBits();
if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
return SDValue();
SDLoc DL(N);
unsigned ShiftVal = SplatVal.countTrailingOnes();
SDValue ShAmt = DAG.getConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
return DAG.getBitcast(N->getValueType(0), Shift);
}
// Get the index node from the lowered DAG of a GEP IR instruction with one
// indexing dimension.
static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
if (Ld->isIndexed())
return SDValue();
SDValue Base = Ld->getBasePtr();
if (Base.getOpcode() != ISD::ADD)
return SDValue();
SDValue ShiftedIndex = Base.getOperand(0);
if (ShiftedIndex.getOpcode() != ISD::SHL)
return SDValue();
return ShiftedIndex.getOperand(0);
}
static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
switch (VT.getSizeInBits()) {
default: return false;
case 64: return Subtarget.is64Bit() ? true : false;
case 32: return true;
}
}
return false;
}
// This function recognizes cases where X86 bzhi instruction can replace and
// 'and-load' sequence.
// In case of loading integer value from an array of constants which is defined
// as follows:
//
// int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
//
// then applying a bitwise and on the result with another input.
// It's equivalent to performing bzhi (zero high bits) on the input, with the
// same index of the load.
static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = Node->getSimpleValueType(0);
SDLoc dl(Node);
// Check if subtarget has BZHI instruction for the node's type
if (!hasBZHI(Subtarget, VT))
return SDValue();
// Try matching the pattern for both operands.
for (unsigned i = 0; i < 2; i++) {
SDValue N = Node->getOperand(i);
LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
// continue if the operand is not a load instruction
if (!Ld)
return SDValue();
const Value *MemOp = Ld->getMemOperand()->getValue();
if (!MemOp)
return SDValue();
if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
Constant *Init = GV->getInitializer();
Type *Ty = Init->getType();
if (!isa<ConstantDataArray>(Init) ||
!Ty->getArrayElementType()->isIntegerTy() ||
Ty->getArrayElementType()->getScalarSizeInBits() !=
VT.getSizeInBits() ||
Ty->getArrayNumElements() >
Ty->getArrayElementType()->getScalarSizeInBits())
continue;
// Check if the array's constant elements are suitable to our case.
uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
bool ConstantsMatch = true;
for (uint64_t j = 0; j < ArrayElementCount; j++) {
ConstantInt *Elem =
dyn_cast<ConstantInt>(Init->getAggregateElement(j));
if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
ConstantsMatch = false;
break;
}
}
if (!ConstantsMatch)
continue;
// Do the transformation (For 32-bit type):
// -> (and (load arr[idx]), inp)
// <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
// that will be replaced with one bzhi instruction.
SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
// Get the Node which indexes into the array.
SDValue Index = getIndexFromUnindexedLoad(Ld);
if (!Index)
return SDValue();
Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
}
}
}
}
return SDValue();
}
static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
// If this is SSE1 only convert to FAND to avoid scalarization.
if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
return DAG.getBitcast(
MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32,
DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
}
// Use a 32-bit and+zext if upper bits known zero.
if (VT == MVT::i64 && Subtarget.is64Bit() &&
!isa<ConstantSDNode>(N->getOperand(1))) {
APInt HiMask = APInt::getHighBitsSet(64, 32);
if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) ||
DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) {
SDLoc dl(N);
SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0));
SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1));
return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
}
}
if (DCI.isBeforeLegalizeOps())
return SDValue();
if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
return R;
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
return FPLogic;
if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
return R;
if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
return ShiftRight;
if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
return R;
// Attempt to recursively combine a bitmask AND with shuffles.
if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
SDValue Op(N, 0);
if (SDValue Res = combineX86ShufflesRecursively(
{Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false, DAG, Subtarget))
return Res;
}
// Attempt to combine a scalar bitmask AND with an extracted shuffle.
if ((VT.getScalarSizeInBits() % 8) == 0 &&
N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) {
SDValue BitMask = N->getOperand(1);
SDValue SrcVec = N->getOperand(0).getOperand(0);
EVT SrcVecVT = SrcVec.getValueType();
// Check that the constant bitmask masks whole bytes.
APInt UndefElts;
SmallVector<APInt, 64> EltBits;
if (VT == SrcVecVT.getScalarType() &&
N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) &&
getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
llvm::all_of(EltBits, [](APInt M) {
return M.isNullValue() || M.isAllOnesValue();
})) {
unsigned NumElts = SrcVecVT.getVectorNumElements();
unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
unsigned Idx = N->getOperand(0).getConstantOperandVal(1);
// Create a root shuffle mask from the byte mask and the extracted index.
SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
for (unsigned i = 0; i != Scale; ++i) {
if (UndefElts[i])
continue;
int VecIdx = Scale * Idx + i;
ShuffleMask[VecIdx] =
EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx;
}
if (SDValue Shuffle = combineX86ShufflesRecursively(
{SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 2,
/*HasVarMask*/ false, DAG, Subtarget))
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle,
N->getOperand(0).getOperand(1));
}
}
return SDValue();
}
// Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
if (N->getOpcode() != ISD::OR)
return false;
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// Canonicalize AND to LHS.
if (N1.getOpcode() == ISD::AND)
std::swap(N0, N1);
// Attempt to match OR(AND(M,Y),ANDNP(M,X)).
if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
return false;
Mask = N1.getOperand(0);
X = N1.getOperand(1);
// Check to see if the mask appeared in both the AND and ANDNP.
if (N0.getOperand(0) == Mask)
Y = N0.getOperand(1);
else if (N0.getOperand(1) == Mask)
Y = N0.getOperand(0);
else
return false;
// TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
// ANDNP combine allows other combines to happen that prevent matching.
return true;
}
// Try to fold:
// (or (and (m, y), (pandn m, x)))
// into:
// (vselect m, x, y)
// As a special case, try to fold:
// (or (and (m, (sub 0, x)), (pandn m, x)))
// into:
// (sub (xor X, M), M)
static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
EVT VT = N->getValueType(0);
if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
(VT.is256BitVector() && Subtarget.hasInt256())))
return SDValue();
SDValue X, Y, Mask;
if (!matchLogicBlend(N, X, Y, Mask))
return SDValue();
// Validate that X, Y, and Mask are bitcasts, and see through them.
Mask = peekThroughBitcasts(Mask);
X = peekThroughBitcasts(X);
Y = peekThroughBitcasts(Y);
EVT MaskVT = Mask.getValueType();
unsigned EltBits = MaskVT.getScalarSizeInBits();
// TODO: Attempt to handle floating point cases as well?
if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
return SDValue();
SDLoc DL(N);
// Try to match:
// (or (and (M, (sub 0, X)), (pandn M, X)))
// which is a special case of vselect:
// (vselect M, (sub 0, X), X)
// Per:
// http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
// We know that, if fNegate is 0 or 1:
// (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
//
// Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
// ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
// ( M ? -X : X) == ((X ^ M ) + (M & 1))
// This lets us transform our vselect to:
// (add (xor X, M), (and M, 1))
// And further to:
// (sub (xor X, M), M)
if (X.getValueType() == MaskVT && Y.getValueType() == MaskVT &&
DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT)) {
auto IsNegV = [](SDNode *N, SDValue V) {
return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
};
SDValue V;
if (IsNegV(Y.getNode(), X))
V = X;
else if (IsNegV(X.getNode(), Y))
V = Y;
if (V) {
SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
SDValue SubOp2 = Mask;
// If the negate was on the false side of the select, then
// the operands of the SUB need to be swapped. PR 27251.
// This is because the pattern being matched above is
// (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
// but if the pattern matched was
// (vselect M, X, (sub (0, X))), that is really negation of the pattern
// above, -(vselect M, (sub 0, X), X), and therefore the replacement
// pattern also needs to be a negation of the replacement pattern above.
// And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
// sub accomplishes the negation of the replacement pattern.
if (V == Y)
std::swap(SubOp1, SubOp2);
SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
return DAG.getBitcast(VT, Res);
}
}
// PBLENDVB is only available on SSE 4.1.
if (!Subtarget.hasSSE41())
return SDValue();
MVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
X = DAG.getBitcast(BlendVT, X);
Y = DAG.getBitcast(BlendVT, Y);
Mask = DAG.getBitcast(BlendVT, Mask);
Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
return DAG.getBitcast(VT, Mask);
}
// Helper function for combineOrCmpEqZeroToCtlzSrl
// Transforms:
// seteq(cmp x, 0)
// into:
// srl(ctlz x), log2(bitsize(x))
// Input pattern is checked by caller.
static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
SelectionDAG &DAG) {
SDValue Cmp = Op.getOperand(1);
EVT VT = Cmp.getOperand(0).getValueType();
unsigned Log2b = Log2_32(VT.getSizeInBits());
SDLoc dl(Op);
SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
// The result of the shift is true or false, and on X86, the 32-bit
// encoding of shr and lzcnt is more desirable.
SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
DAG.getConstant(Log2b, dl, MVT::i8));
return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
}
// Try to transform:
// zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
// into:
// srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
// Will also attempt to match more generic cases, eg:
// zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
// Only applies if the target supports the FastLZCNT feature.
static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
return SDValue();
auto isORCandidate = [](SDValue N) {
return (N->getOpcode() == ISD::OR && N->hasOneUse());
};
// Check the zero extend is extending to 32-bit or more. The code generated by
// srl(ctlz) for 16-bit or less variants of the pattern would require extra
// instructions to clear the upper bits.
if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
!isORCandidate(N->getOperand(0)))
return SDValue();
// Check the node matches: setcc(eq, cmp 0)
auto isSetCCCandidate = [](SDValue N) {
return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
N->getOperand(1).getOpcode() == X86ISD::CMP &&
isNullConstant(N->getOperand(1).getOperand(1)) &&
N->getOperand(1).getValueType().bitsGE(MVT::i32);
};
SDNode *OR = N->getOperand(0).getNode();
SDValue LHS = OR->getOperand(0);
SDValue RHS = OR->getOperand(1);
// Save nodes matching or(or, setcc(eq, cmp 0)).
SmallVector<SDNode *, 2> ORNodes;
while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
(isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
ORNodes.push_back(OR);
OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
LHS = OR->getOperand(0);
RHS = OR->getOperand(1);
}
// The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
!isORCandidate(SDValue(OR, 0)))
return SDValue();
// We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
// to
// or(srl(ctlz),srl(ctlz)).
// The dag combiner can then fold it into:
// srl(or(ctlz, ctlz)).
EVT VT = OR->getValueType(0);
SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
SDValue Ret, NewRHS;
if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);
if (!Ret)
return SDValue();
// Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
while (ORNodes.size() > 0) {
OR = ORNodes.pop_back_val();
LHS = OR->getOperand(0);
RHS = OR->getOperand(1);
// Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
if (RHS->getOpcode() == ISD::OR)
std::swap(LHS, RHS);
EVT VT = OR->getValueType(0);
SDValue NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
if (!NewRHS)
return SDValue();
Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
}
if (Ret)
Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
return Ret;
}
static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
// If this is SSE1 only convert to FOR to avoid scalarization.
if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
return DAG.getBitcast(MVT::v4i32,
DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32,
DAG.getBitcast(MVT::v4f32, N0),
DAG.getBitcast(MVT::v4f32, N1)));
}
if (DCI.isBeforeLegalizeOps())
return SDValue();
if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
return R;
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
return FPLogic;
if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
return R;
if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
return SDValue();
// fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
bool OptForSize = DAG.getMachineFunction().getFunction().optForSize();
// SHLD/SHRD instructions have lower register pressure, but on some
// platforms they have higher latency than the equivalent
// series of shifts/or that would otherwise be generated.
// Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
// have higher latencies and we are not optimizing for size.
if (!OptForSize && Subtarget.isSHLDSlow())
return SDValue();
if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
std::swap(N0, N1);
if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
return SDValue();
if (!N0.hasOneUse() || !N1.hasOneUse())
return SDValue();
SDValue ShAmt0 = N0.getOperand(1);
if (ShAmt0.getValueType() != MVT::i8)
return SDValue();
SDValue ShAmt1 = N1.getOperand(1);
if (ShAmt1.getValueType() != MVT::i8)
return SDValue();
if (ShAmt0.getOpcode() == ISD::TRUNCATE)
ShAmt0 = ShAmt0.getOperand(0);
if (ShAmt1.getOpcode() == ISD::TRUNCATE)
ShAmt1 = ShAmt1.getOperand(0);
SDLoc DL(N);
unsigned Opc = X86ISD::SHLD;
SDValue Op0 = N0.getOperand(0);
SDValue Op1 = N1.getOperand(0);
if (ShAmt0.getOpcode() == ISD::SUB ||
ShAmt0.getOpcode() == ISD::XOR) {
Opc = X86ISD::SHRD;
std::swap(Op0, Op1);
std::swap(ShAmt0, ShAmt1);
}
// OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> SHLD( X, Y, C )
// OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> SHRD( X, Y, C )
// OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> SHLD( X, Y, C )
// OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> SHRD( X, Y, C )
unsigned Bits = VT.getSizeInBits();
if (ShAmt1.getOpcode() == ISD::SUB) {
SDValue Sum = ShAmt1.getOperand(0);
if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE)
ShAmt1Op1 = ShAmt1Op1.getOperand(0);
if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
return DAG.getNode(Opc, DL, VT,
Op0, Op1,
DAG.getNode(ISD::TRUNCATE, DL,
MVT::i8, ShAmt0));
}
} else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits)
return DAG.getNode(Opc, DL, VT,
N0.getOperand(0), N1.getOperand(0),
DAG.getNode(ISD::TRUNCATE, DL,
MVT::i8, ShAmt0));
} else if (ShAmt1.getOpcode() == ISD::XOR) {
SDValue Mask = ShAmt1.getOperand(1);
if (ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
unsigned InnerShift = (X86ISD::SHLD == Opc ? ISD::SRL : ISD::SHL);
SDValue ShAmt1Op0 = ShAmt1.getOperand(0);
if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE)
ShAmt1Op0 = ShAmt1Op0.getOperand(0);
if (MaskC->getSExtValue() == (Bits - 1) && ShAmt1Op0 == ShAmt0) {
if (Op1.getOpcode() == InnerShift &&
isa<ConstantSDNode>(Op1.getOperand(1)) &&
Op1.getConstantOperandVal(1) == 1) {
return DAG.getNode(Opc, DL, VT, Op0, Op1.getOperand(0),
DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0));
}
// Test for ADD( Y, Y ) as an equivalent to SHL( Y, 1 ).
if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD &&
Op1.getOperand(0) == Op1.getOperand(1)) {
return DAG.getNode(Opc, DL, VT, Op0, Op1.getOperand(0),
DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0));
}
}
}
}
return SDValue();
}
/// Try to turn tests against the signbit in the form of:
/// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
/// into:
/// SETGT(X, -1)
static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
// This is only worth doing if the output type is i8 or i1.
EVT ResultType = N->getValueType(0);
if (ResultType != MVT::i8 && ResultType != MVT::i1)
return SDValue();
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// We should be performing an xor against a truncated shift.
if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
return SDValue();
// Make sure we are performing an xor against one.
if (!isOneConstant(N1))
return SDValue();
// SetCC on x86 zero extends so only act on this if it's a logical shift.
SDValue Shift = N0.getOperand(0);
if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
return SDValue();
// Make sure we are truncating from one of i16, i32 or i64.
EVT ShiftTy = Shift.getValueType();
if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
return SDValue();
// Make sure the shift amount extracts the sign bit.
if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
Shift.getConstantOperandVal(1) != ShiftTy.getSizeInBits() - 1)
return SDValue();
// Create a greater-than comparison against -1.
// N.B. Using SETGE against 0 works but we want a canonical looking
// comparison, using SETGT matches up with what TranslateX86CC.
SDLoc DL(N);
SDValue ShiftOp = Shift.getOperand(0);
EVT ShiftOpTy = ShiftOp.getValueType();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
*DAG.getContext(), ResultType);
SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
if (SetCCResultType != ResultType)
Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
return Cond;
}
/// Turn vector tests of the signbit in the form of:
/// xor (sra X, elt_size(X)-1), -1
/// into:
/// pcmpgt X, -1
///
/// This should be called before type legalization because the pattern may not
/// persist after that.
static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
if (!VT.isSimple())
return SDValue();
switch (VT.getSimpleVT().SimpleTy) {
default: return SDValue();
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32: if (!Subtarget.hasSSE2()) return SDValue(); break;
case MVT::v2i64: if (!Subtarget.hasSSE42()) return SDValue(); break;
case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
}
// There must be a shift right algebraic before the xor, and the xor must be a
// 'not' operation.
SDValue Shift = N->getOperand(0);
SDValue Ones = N->getOperand(1);
if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
!ISD::isBuildVectorAllOnes(Ones.getNode()))
return SDValue();
// The shift should be smearing the sign bit across each vector element.
auto *ShiftBV = dyn_cast<BuildVectorSDNode>(Shift.getOperand(1));
if (!ShiftBV)
return SDValue();
EVT ShiftEltTy = Shift.getValueType().getVectorElementType();
auto *ShiftAmt = ShiftBV->getConstantSplatNode();
if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1)
return SDValue();
// Create a greater-than comparison against -1. We don't use the more obvious
// greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones);
}
/// Check if truncation with saturation form type \p SrcVT to \p DstVT
/// is valid for the given \p Subtarget.
static bool isSATValidOnAVX512Subtarget(EVT SrcVT, EVT DstVT,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasAVX512())
return false;
// FIXME: Scalar type may be supported if we move it to vector register.
if (!SrcVT.isVector())
return false;
EVT SrcElVT = SrcVT.getScalarType();
EVT DstElVT = DstVT.getScalarType();
if (DstElVT != MVT::i8 && DstElVT != MVT::i16 && DstElVT != MVT::i32)
return false;
if (SrcVT.is512BitVector() || Subtarget.hasVLX())
return SrcElVT.getSizeInBits() >= 32 || Subtarget.hasBWI();
return false;
}
/// Detect patterns of truncation with unsigned saturation:
///
/// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
/// Return the source value x to be truncated or SDValue() if the pattern was
/// not matched.
///
/// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
/// where C1 >= 0 and C2 is unsigned max of destination type.
///
/// (truncate (smax (smin (x, C2), C1)) to dest_type)
/// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
///
/// These two patterns are equivalent to:
/// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
/// So return the smax(x, C1) value to be truncated or SDValue() if the
/// pattern was not matched.
static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
const SDLoc &DL) {
EVT InVT = In.getValueType();
// Saturation with truncation. We truncate from InVT to VT.
assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
"Unexpected types for truncate operation");
// Match min/max and return limit value as a parameter.
auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
if (V.getOpcode() == Opcode &&
ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
return V.getOperand(0);
return SDValue();
};
APInt C1, C2;
if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
// C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
// the element size of the destination type.
if (C2.isMask(VT.getScalarSizeInBits()))
return UMin;
if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
if (MatchMinMax(SMin, ISD::SMAX, C1))
if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
return SMin;
if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
C2.uge(C1)) {
return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
}
return SDValue();
}
/// Detect patterns of truncation with signed saturation:
/// (truncate (smin ((smax (x, signed_min_of_dest_type)),
/// signed_max_of_dest_type)) to dest_type)
/// or:
/// (truncate (smax ((smin (x, signed_max_of_dest_type)),
/// signed_min_of_dest_type)) to dest_type).
/// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
/// Return the source value to be truncated or SDValue() if the pattern was not
/// matched.
static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
unsigned NumDstBits = VT.getScalarSizeInBits();
unsigned NumSrcBits = In.getScalarValueSizeInBits();
assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
auto MatchMinMax = [](SDValue V, unsigned Opcode,
const APInt &Limit) -> SDValue {
APInt C;
if (V.getOpcode() == Opcode &&
ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
return V.getOperand(0);
return SDValue();
};
APInt SignedMax, SignedMin;
if (MatchPackUS) {
SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
SignedMin = APInt(NumSrcBits, 0);
} else {
SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
}
if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
return SMax;
if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
return SMin;
return SDValue();
}
/// Detect a pattern of truncation with signed saturation.
/// The types should allow to use VPMOVSS* instruction on AVX512.
/// Return the source value to be truncated or SDValue() if the pattern was not
/// matched.
static SDValue detectAVX512SSatPattern(SDValue In, EVT VT,
const X86Subtarget &Subtarget,
const TargetLowering &TLI) {
if (!TLI.isTypeLegal(In.getValueType()))
return SDValue();
if (!isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget))
return SDValue();
return detectSSatPattern(In, VT);
}
/// Detect a pattern of truncation with saturation:
/// (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
/// The types should allow to use VPMOVUS* instruction on AVX512.
/// Return the source value to be truncated or SDValue() if the pattern was not
/// matched.
static SDValue detectAVX512USatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
const SDLoc &DL,
const X86Subtarget &Subtarget,
const TargetLowering &TLI) {
if (!TLI.isTypeLegal(In.getValueType()))
return SDValue();
if (!isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget))
return SDValue();
return detectUSatPattern(In, VT, DAG, DL);
}
static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT SVT = VT.getScalarType();
EVT InVT = In.getValueType();
EVT InSVT = InVT.getScalarType();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (TLI.isTypeLegal(InVT) && TLI.isTypeLegal(VT) &&
isSATValidOnAVX512Subtarget(InVT, VT, Subtarget)) {
if (auto SSatVal = detectSSatPattern(In, VT))
return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
}
if (VT.isVector() && isPowerOf2_32(VT.getVectorNumElements()) &&
(SVT == MVT::i8 || SVT == MVT::i16) &&
(InSVT == MVT::i16 || InSVT == MVT::i32)) {
if (auto USatVal = detectSSatPattern(In, VT, true)) {
// vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
if (SVT == MVT::i8 && InSVT == MVT::i32) {
EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
VT.getVectorNumElements());
SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
DAG, Subtarget);
if (Mid)
return truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
Subtarget);
} else if (SVT == MVT::i8 || Subtarget.hasSSE41())
return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
Subtarget);
}
if (auto SSatVal = detectSSatPattern(In, VT))
return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
Subtarget);
}
return SDValue();
}
/// This function detects the AVG pattern between vectors of unsigned i8/i16,
/// which is c = (a + b + 1) / 2, and replace this operation with the efficient
/// X86ISD::AVG instruction.
static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
const SDLoc &DL) {
if (!VT.isVector())
return SDValue();
EVT InVT = In.getValueType();
unsigned NumElems = VT.getVectorNumElements();
EVT ScalarVT = VT.getVectorElementType();
if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
isPowerOf2_32(NumElems)))
return SDValue();
// InScalarVT is the intermediate type in AVG pattern and it should be greater
// than the original input type (i8/i16).
EVT InScalarVT = InVT.getVectorElementType();
if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
return SDValue();
if (!Subtarget.hasSSE2())
return SDValue();
// Detect the following pattern:
//
// %1 = zext <N x i8> %a to <N x i32>
// %2 = zext <N x i8> %b to <N x i32>
// %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
// %4 = add nuw nsw <N x i32> %3, %2
// %5 = lshr <N x i32> %N, <i32 1 x N>
// %6 = trunc <N x i32> %5 to <N x i8>
//
// In AVX512, the last instruction can also be a trunc store.
if (In.getOpcode() != ISD::SRL)
return SDValue();
// A lambda checking the given SDValue is a constant vector and each element
// is in the range [Min, Max].
auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
if (!BV || !BV->isConstant())
return false;
for (SDValue Op : V->ops()) {
ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
if (!C)
return false;
const APInt &Val = C->getAPIntValue();
if (Val.ult(Min) || Val.ugt(Max))
return false;
}
return true;
};
// Check if each element of the vector is left-shifted by one.
auto LHS = In.getOperand(0);
auto RHS = In.getOperand(1);
if (!IsConstVectorInRange(RHS, 1, 1))
return SDValue();
if (LHS.getOpcode() != ISD::ADD)
return SDValue();
// Detect a pattern of a + b + 1 where the order doesn't matter.
SDValue Operands[3];
Operands[0] = LHS.getOperand(0);
Operands[1] = LHS.getOperand(1);
auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
};
// Take care of the case when one of the operands is a constant vector whose
// element is in the range [1, 256].
if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
Operands[0].getOperand(0).getValueType() == VT) {
// The pattern is detected. Subtract one from the constant vector, then
// demote it and emit X86ISD::AVG instruction.
SDValue VecOnes = DAG.getConstant(1, DL, InVT);
Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
return SplitOpsAndApply(DAG, Subtarget, DL, VT,
{ Operands[0].getOperand(0), Operands[1] },
AVGBuilder);
}
if (Operands[0].getOpcode() == ISD::ADD)
std::swap(Operands[0], Operands[1]);
else if (Operands[1].getOpcode() != ISD::ADD)
return SDValue();
Operands[2] = Operands[1].getOperand(0);
Operands[1] = Operands[1].getOperand(1);
// Now we have three operands of two additions. Check that one of them is a
// constant vector with ones, and the other two are promoted from i8/i16.
for (int i = 0; i < 3; ++i) {
if (!IsConstVectorInRange(Operands[i], 1, 1))
continue;
std::swap(Operands[i], Operands[2]);
// Check if Operands[0] and Operands[1] are results of type promotion.
for (int j = 0; j < 2; ++j)
if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
Operands[j].getOperand(0).getValueType() != VT)
return SDValue();
// The pattern is detected, emit X86ISD::AVG instruction(s).
return SplitOpsAndApply(DAG, Subtarget, DL, VT,
{ Operands[0].getOperand(0),
Operands[1].getOperand(0) }, AVGBuilder);
}
return SDValue();
}
static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
LoadSDNode *Ld = cast<LoadSDNode>(N);
EVT RegVT = Ld->getValueType(0);
EVT MemVT = Ld->getMemoryVT();
SDLoc dl(Ld);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// For chips with slow 32-byte unaligned loads, break the 32-byte operation
// into two 16-byte operations. Also split non-temporal aligned loads on
// pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
ISD::LoadExtType Ext = Ld->getExtensionType();
bool Fast;
unsigned AddressSpace = Ld->getAddressSpace();
unsigned Alignment = Ld->getAlignment();
if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
Ext == ISD::NON_EXTLOAD &&
((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) ||
(TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
AddressSpace, Alignment, &Fast) && !Fast))) {
unsigned NumElems = RegVT.getVectorNumElements();
if (NumElems < 2)
return SDValue();
SDValue Ptr = Ld->getBasePtr();
EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
NumElems/2);
SDValue Load1 =
DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
Alignment, Ld->getMemOperand()->getFlags());
Ptr = DAG.getMemBasePlusOffset(Ptr, 16, dl);
SDValue Load2 =
DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
Ld->getPointerInfo().getWithOffset(16),
MinAlign(Alignment, 16U), Ld->getMemOperand()->getFlags());
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Load1.getValue(1),
Load2.getValue(1));
SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
return DCI.CombineTo(N, NewVec, TF, true);
}
return SDValue();
}
/// If V is a build vector of boolean constants and exactly one of those
/// constants is true, return the operand index of that true element.
/// Otherwise, return -1.
static int getOneTrueElt(SDValue V) {
// This needs to be a build vector of booleans.
// TODO: Checking for the i1 type matches the IR definition for the mask,
// but the mask check could be loosened to i8 or other types. That might
// also require checking more than 'allOnesValue'; eg, the x86 HW
// instructions only require that the MSB is set for each mask element.
// The ISD::MSTORE comments/definition do not specify how the mask operand
// is formatted.
auto *BV = dyn_cast<BuildVectorSDNode>(V);
if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
return -1;
int TrueIndex = -1;
unsigned NumElts = BV->getValueType(0).getVectorNumElements();
for (unsigned i = 0; i < NumElts; ++i) {
const SDValue &Op = BV->getOperand(i);
if (Op.isUndef())
continue;
auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
if (!ConstNode)
return -1;
if (ConstNode->getAPIntValue().isAllOnesValue()) {
// If we already found a one, this is too many.
if (TrueIndex >= 0)
return -1;
TrueIndex = i;
}
}
return TrueIndex;
}
/// Given a masked memory load/store operation, return true if it has one mask
/// bit set. If it has one mask bit set, then also return the memory address of
/// the scalar element to load/store, the vector index to insert/extract that
/// scalar element, and the alignment for the scalar memory access.
static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
SelectionDAG &DAG, SDValue &Addr,
SDValue &Index, unsigned &Alignment) {
int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
if (TrueMaskElt < 0)
return false;
// Get the address of the one scalar element that is specified by the mask
// using the appropriate offset from the base pointer.
EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
Addr = MaskedOp->getBasePtr();
if (TrueMaskElt != 0) {
unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
}
Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
return true;
}
/// If exactly one element of the mask is set for a non-extending masked load,
/// it is a scalar load and vector insert.
/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
/// mask have already been optimized in IR, so we don't bother with those here.
static SDValue
reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
// TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
// However, some target hooks may need to be added to know when the transform
// is profitable. Endianness would also have to be considered.
SDValue Addr, VecIndex;
unsigned Alignment;
if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
return SDValue();
// Load the one scalar element that is specified by the mask using the
// appropriate offset from the base pointer.
SDLoc DL(ML);
EVT VT = ML->getValueType(0);
EVT EltVT = VT.getVectorElementType();
SDValue Load =
DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
Alignment, ML->getMemOperand()->getFlags());
// Insert the loaded element into the appropriate place in the vector.
SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, ML->getSrc0(),
Load, VecIndex);
return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
}
static SDValue
combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
return SDValue();
SDLoc DL(ML);
EVT VT = ML->getValueType(0);
// If we are loading the first and last elements of a vector, it is safe and
// always faster to load the whole vector. Replace the masked load with a
// vector load and select.
unsigned NumElts = VT.getVectorNumElements();
BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
if (LoadFirstElt && LoadLastElt) {
SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
ML->getMemOperand());
SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd, ML->getSrc0());
return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
}
// Convert a masked load with a constant mask into a masked load and a select.
// This allows the select operation to use a faster kind of select instruction
// (for example, vblendvps -> vblendps).
// Don't try this if the pass-through operand is already undefined. That would
// cause an infinite loop because that's what we're about to create.
if (ML->getSrc0().isUndef())
return SDValue();
// The new masked load has an undef pass-through operand. The select uses the
// original pass-through operand.
SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
ML->getMask(), DAG.getUNDEF(VT),
ML->getMemoryVT(), ML->getMemOperand(),
ML->getExtensionType());
SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML, ML->getSrc0());
return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
}
static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
// TODO: Expanding load with constant mask may be optimized as well.
if (Mld->isExpandingLoad())
return SDValue();
if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
return ScalarLoad;
// TODO: Do some AVX512 subsets benefit from this transform?
if (!Subtarget.hasAVX512())
if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
return Blend;
}
if (Mld->getExtensionType() != ISD::SEXTLOAD)
return SDValue();
// Resolve extending loads.
EVT VT = Mld->getValueType(0);
unsigned NumElems = VT.getVectorNumElements();
EVT LdVT = Mld->getMemoryVT();
SDLoc dl(Mld);
assert(LdVT != VT && "Cannot extend to the same type");
unsigned ToSz = VT.getScalarSizeInBits();
unsigned FromSz = LdVT.getScalarSizeInBits();
// From/To sizes and ElemCount must be pow of two.
assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
"Unexpected size for extending masked load");
unsigned SizeRatio = ToSz / FromSz;
assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
// Create a type on which we perform the shuffle.
EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
LdVT.getScalarType(), NumElems*SizeRatio);
assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
// Convert Src0 value.
SDValue WideSrc0 = DAG.getBitcast(WideVecVT, Mld->getSrc0());
if (!Mld->getSrc0().isUndef()) {
SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
// Can't shuffle using an illegal type.
assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
"WideVecVT should be legal");
WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
DAG.getUNDEF(WideVecVT), ShuffleVec);
}
// Prepare the new mask.
SDValue NewMask;
SDValue Mask = Mld->getMask();
if (Mask.getValueType() == VT) {
// Mask and original value have the same type.
NewMask = DAG.getBitcast(WideVecVT, Mask);
SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
for (unsigned i = NumElems; i != NumElems * SizeRatio; ++i)
ShuffleVec[i] = NumElems * SizeRatio;
NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
DAG.getConstant(0, dl, WideVecVT),
ShuffleVec);
} else {
assert(Mask.getValueType().getVectorElementType() == MVT::i1);
unsigned WidenNumElts = NumElems*SizeRatio;
unsigned MaskNumElts = VT.getVectorNumElements();
EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
WidenNumElts);
unsigned NumConcat = WidenNumElts / MaskNumElts;
SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
SmallVector<SDValue, 16> Ops(NumConcat, ZeroVal);
Ops[0] = Mask;
NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
}
SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
Mld->getBasePtr(), NewMask, WideSrc0,
Mld->getMemoryVT(), Mld->getMemOperand(),
ISD::NON_EXTLOAD);
SDValue NewVec = getExtendInVec(X86ISD::VSEXT, dl, VT, WideLd, DAG);
return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
}
/// If exactly one element of the mask is set for a non-truncating masked store,
/// it is a vector extract and scalar store.
/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
/// mask have already been optimized in IR, so we don't bother with those here.
static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
SelectionDAG &DAG) {
// TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
// However, some target hooks may need to be added to know when the transform
// is profitable. Endianness would also have to be considered.
SDValue Addr, VecIndex;
unsigned Alignment;
if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
return SDValue();
// Extract the one scalar element that is actually being stored.
SDLoc DL(MS);
EVT VT = MS->getValue().getValueType();
EVT EltVT = VT.getVectorElementType();
SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
MS->getValue(), VecIndex);
// Store that element at the appropriate offset from the base pointer.
return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
Alignment, MS->getMemOperand()->getFlags());
}
static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
if (Mst->isCompressingStore())
return SDValue();
if (!Mst->isTruncatingStore()) {
if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG))
return ScalarStore;
// If the mask is checking (0 > X), we're creating a vector with all-zeros
// or all-ones elements based on the sign bits of X. AVX1 masked store only
// cares about the sign bit of each mask element, so eliminate the compare:
// mstore val, ptr, (pcmpgt 0, X) --> mstore val, ptr, X
// Note that by waiting to match an x86-specific PCMPGT node, we're
// eliminating potentially more complex matching of a setcc node which has
// a full range of predicates.
SDValue Mask = Mst->getMask();
if (Mask.getOpcode() == X86ISD::PCMPGT &&
ISD::isBuildVectorAllZeros(Mask.getOperand(0).getNode())) {
assert(Mask.getValueType() == Mask.getOperand(1).getValueType() &&
"Unexpected type for PCMPGT");
return DAG.getMaskedStore(
Mst->getChain(), SDLoc(N), Mst->getValue(), Mst->getBasePtr(),
Mask.getOperand(1), Mst->getMemoryVT(), Mst->getMemOperand());
}
// TODO: AVX512 targets should also be able to simplify something like the
// pattern above, but that pattern will be different. It will either need to
// match setcc more generally or match PCMPGTM later (in tablegen?).
return SDValue();
}
// Resolve truncating stores.
EVT VT = Mst->getValue().getValueType();
unsigned NumElems = VT.getVectorNumElements();
EVT StVT = Mst->getMemoryVT();
SDLoc dl(Mst);
assert(StVT != VT && "Cannot truncate to the same type");
unsigned FromSz = VT.getScalarSizeInBits();
unsigned ToSz = StVT.getScalarSizeInBits();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// The truncating store is legal in some cases. For example
// vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
// are designated for truncate store.
// In this case we don't need any further transformations.
if (TLI.isTruncStoreLegal(VT, StVT))
return SDValue();
// From/To sizes and ElemCount must be pow of two.
assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
"Unexpected size for truncating masked store");
// We are going to use the original vector elt for storing.
// Accumulated smaller vector elements must be a multiple of the store size.
assert (((NumElems * FromSz) % ToSz) == 0 &&
"Unexpected ratio for truncating masked store");
unsigned SizeRatio = FromSz / ToSz;
assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
// Create a type on which we perform the shuffle.
EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
StVT.getScalarType(), NumElems*SizeRatio);
assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
SDValue WideVec = DAG.getBitcast(WideVecVT, Mst->getValue());
SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
// Can't shuffle using an illegal type.
assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&
"WideVecVT should be legal");
SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
DAG.getUNDEF(WideVecVT),
ShuffleVec);
SDValue NewMask;
SDValue Mask = Mst->getMask();
if (Mask.getValueType() == VT) {
// Mask and original value have the same type.
NewMask = DAG.getBitcast(WideVecVT, Mask);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
ShuffleVec[i] = NumElems*SizeRatio;
NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
DAG.getConstant(0, dl, WideVecVT),
ShuffleVec);
} else {
assert(Mask.getValueType().getVectorElementType() == MVT::i1);
unsigned WidenNumElts = NumElems*SizeRatio;
unsigned MaskNumElts = VT.getVectorNumElements();
EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
WidenNumElts);
unsigned NumConcat = WidenNumElts / MaskNumElts;
SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType());
SmallVector<SDValue, 16> Ops(NumConcat, ZeroVal);
Ops[0] = Mask;
NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
}
return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal,
Mst->getBasePtr(), NewMask, StVT,
Mst->getMemOperand(), false);
}
static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
StoreSDNode *St = cast<StoreSDNode>(N);
EVT VT = St->getValue().getValueType();
EVT StVT = St->getMemoryVT();
SDLoc dl(St);
SDValue StoredVal = St->getOperand(1);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
// This will avoid a copy to k-register.
if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
StoredVal.getOperand(0).getValueType() == MVT::i8) {
return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0),
St->getBasePtr(), St->getPointerInfo(),
St->getAlignment(), St->getMemOperand()->getFlags());
}
// Widen v2i1/v4i1 stores to v8i1.
if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
Subtarget.hasAVX512()) {
unsigned NumConcats = 8 / VT.getVectorNumElements();
SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT));
Ops[0] = StoredVal;
StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
St->getPointerInfo(), St->getAlignment(),
St->getMemOperand()->getFlags());
}
// Turn vXi1 stores of constants into a scalar store.
if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
// If its a v64i1 store without 64-bit support, we need two stores.
if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
StoredVal->ops().slice(0, 32));
Lo = combinevXi1ConstantToInteger(Lo, DAG);
SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
StoredVal->ops().slice(32, 32));
Hi = combinevXi1ConstantToInteger(Hi, DAG);
unsigned Alignment = St->getAlignment();
SDValue Ptr0 = St->getBasePtr();
SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl);
SDValue Ch0 =
DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
Alignment, St->getMemOperand()->getFlags());
SDValue Ch1 =
DAG.getStore(St->getChain(), dl, Hi, Ptr1,
St->getPointerInfo().getWithOffset(4),
MinAlign(Alignment, 4U),
St->getMemOperand()->getFlags());
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
}
StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
St->getPointerInfo(), St->getAlignment(),
St->getMemOperand()->getFlags());
}
// If we are saving a concatenation of two XMM registers and 32-byte stores
// are slow, such as on Sandy Bridge, perform two 16-byte stores.
bool Fast;
unsigned AddressSpace = St->getAddressSpace();
unsigned Alignment = St->getAlignment();
if (VT.is256BitVector() && StVT == VT &&
TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
AddressSpace, Alignment, &Fast) &&
!Fast) {
unsigned NumElems = VT.getVectorNumElements();
if (NumElems < 2)
return SDValue();
SDValue Value0 = extract128BitVector(StoredVal, 0, DAG, dl);
SDValue Value1 = extract128BitVector(StoredVal, NumElems / 2, DAG, dl);
SDValue Ptr0 = St->getBasePtr();
SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 16, dl);
SDValue Ch0 =
DAG.getStore(St->getChain(), dl, Value0, Ptr0, St->getPointerInfo(),
Alignment, St->getMemOperand()->getFlags());
SDValue Ch1 =
DAG.getStore(St->getChain(), dl, Value1, Ptr1,
St->getPointerInfo().getWithOffset(16),
MinAlign(Alignment, 16U), St->getMemOperand()->getFlags());
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
}
// Optimize trunc store (of multiple scalars) to shuffle and store.
// First, pack all of the elements in one place. Next, store to memory
// in fewer chunks.
if (St->isTruncatingStore() && VT.isVector()) {
// Check if we can detect an AVG pattern from the truncation. If yes,
// replace the trunc store by a normal store with the result of X86ISD::AVG
// instruction.
if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
Subtarget, dl))
return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
St->getPointerInfo(), St->getAlignment(),
St->getMemOperand()->getFlags());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
if (SDValue Val =
detectAVX512SSatPattern(St->getValue(), St->getMemoryVT(), Subtarget,
TLI))
return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
dl, Val, St->getBasePtr(),
St->getMemoryVT(), St->getMemOperand(), DAG);
if (SDValue Val = detectAVX512USatPattern(St->getValue(), St->getMemoryVT(),
DAG, dl, Subtarget, TLI))
return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
dl, Val, St->getBasePtr(),
St->getMemoryVT(), St->getMemOperand(), DAG);
unsigned NumElems = VT.getVectorNumElements();
assert(StVT != VT && "Cannot truncate to the same type");
unsigned FromSz = VT.getScalarSizeInBits();
unsigned ToSz = StVT.getScalarSizeInBits();
// The truncating store is legal in some cases. For example
// vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw
// are designated for truncate store.
// In this case we don't need any further transformations.
if (TLI.isTruncStoreLegalOrCustom(VT, StVT))
return SDValue();
// From, To sizes and ElemCount must be pow of two
if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
// We are going to use the original vector elt for storing.
// Accumulated smaller vector elements must be a multiple of the store size.
if (0 != (NumElems * FromSz) % ToSz) return SDValue();
unsigned SizeRatio = FromSz / ToSz;
assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
// Create a type on which we perform the shuffle
EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
StVT.getScalarType(), NumElems*SizeRatio);
assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
SDValue WideVec = DAG.getBitcast(WideVecVT, St->getValue());
SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
for (unsigned i = 0; i != NumElems; ++i)
ShuffleVec[i] = i * SizeRatio;
// Can't shuffle using an illegal type.
if (!TLI.isTypeLegal(WideVecVT))
return SDValue();
SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
DAG.getUNDEF(WideVecVT),
ShuffleVec);
// At this point all of the data is stored at the bottom of the
// register. We now need to save it to mem.
// Find the largest store unit
MVT StoreType = MVT::i8;
for (MVT Tp : MVT::integer_valuetypes()) {
if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
StoreType = Tp;
}
// On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
(64 <= NumElems * ToSz))
StoreType = MVT::f64;
// Bitcast the original vector into a vector of store-size units
EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
SDValue ShuffWide = DAG.getBitcast(StoreVecVT, Shuff);
SmallVector<SDValue, 8> Chains;
SDValue Ptr = St->getBasePtr();
// Perform one or more big stores into memory.
for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
StoreType, ShuffWide,
DAG.getIntPtrConstant(i, dl));
SDValue Ch =
DAG.getStore(St->getChain(), dl, SubVec, Ptr, St->getPointerInfo(),
St->getAlignment(), St->getMemOperand()->getFlags());
Ptr = DAG.getMemBasePlusOffset(Ptr, StoreType.getStoreSize(), dl);
Chains.push_back(Ch);
}
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
}
// Turn load->store of MMX types into GPR load/stores. This avoids clobbering
// the FP state in cases where an emms may be missing.
// A preferable solution to the general problem is to figure out the right
// places to insert EMMS. This qualifies as a quick hack.
// Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
if (VT.getSizeInBits() != 64)
return SDValue();
const Function &F = DAG.getMachineFunction().getFunction();
bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
bool F64IsLegal =
!Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
if ((VT.isVector() ||
(VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit())) &&
isa<LoadSDNode>(St->getValue()) &&
!cast<LoadSDNode>(St->getValue())->isVolatile() &&
St->getChain().hasOneUse() && !St->isVolatile()) {
LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
SmallVector<SDValue, 8> Ops;
if (!ISD::isNormalLoad(Ld))
return SDValue();
// If this is not the MMX case, i.e. we are just turning i64 load/store
// into f64 load/store, avoid the transformation if there are multiple
// uses of the loaded value.
if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
return SDValue();
SDLoc LdDL(Ld);
SDLoc StDL(N);
// If we are a 64-bit capable x86, lower to a single movq load/store pair.
// Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
// pair instead.
if (Subtarget.is64Bit() || F64IsLegal) {
MVT LdVT = Subtarget.is64Bit() ? MVT::i64 : MVT::f64;
SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
Ld->getMemOperand());
// Make sure new load is placed in same chain order.
DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
St->getMemOperand());
}
// Otherwise, lower to two pairs of 32-bit loads / stores.
SDValue LoAddr = Ld->getBasePtr();
SDValue HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, LdDL);
SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
Ld->getPointerInfo(), Ld->getAlignment(),
Ld->getMemOperand()->getFlags());
SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
Ld->getPointerInfo().getWithOffset(4),
MinAlign(Ld->getAlignment(), 4),
Ld->getMemOperand()->getFlags());
// Make sure new loads are placed in same chain order.
DAG.makeEquivalentMemoryOrdering(Ld, LoLd);
DAG.makeEquivalentMemoryOrdering(Ld, HiLd);
LoAddr = St->getBasePtr();
HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, StDL);
SDValue LoSt =
DAG.getStore(St->getChain(), StDL, LoLd, LoAddr, St->getPointerInfo(),
St->getAlignment(), St->getMemOperand()->getFlags());
SDValue HiSt = DAG.getStore(St->getChain(), StDL, HiLd, HiAddr,
St->getPointerInfo().getWithOffset(4),
MinAlign(St->getAlignment(), 4),
St->getMemOperand()->getFlags());
return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
}
// This is similar to the above case, but here we handle a scalar 64-bit
// integer store that is extracted from a vector on a 32-bit target.
// If we have SSE2, then we can treat it like a floating-point double
// to get past legalization. The execution dependencies fixup pass will
// choose the optimal machine instruction for the store if this really is
// an integer or v2f32 rather than an f64.
if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
SDValue OldExtract = St->getOperand(1);
SDValue ExtOp0 = OldExtract.getOperand(0);
unsigned VecSize = ExtOp0.getValueSizeInBits();
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
BitCast, OldExtract.getOperand(1));
return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
St->getPointerInfo(), St->getAlignment(),
St->getMemOperand()->getFlags());
}
return SDValue();
}
/// Return 'true' if this vector operation is "horizontal"
/// and return the operands for the horizontal operation in LHS and RHS. A
/// horizontal operation performs the binary operation on successive elements
/// of its first operand, then on successive elements of its second operand,
/// returning the resulting values in a vector. For example, if
/// A = < float a0, float a1, float a2, float a3 >
/// and
/// B = < float b0, float b1, float b2, float b3 >
/// then the result of doing a horizontal operation on A and B is
/// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
/// A horizontal-op B, for some already available A and B, and if so then LHS is
/// set to A, RHS to B, and the routine returns 'true'.
/// Note that the binary operation should have the property that if one of the
/// operands is UNDEF then the result is UNDEF.
static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
// Look for the following pattern: if
// A = < float a0, float a1, float a2, float a3 >
// B = < float b0, float b1, float b2, float b3 >
// and
// LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
// RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
// then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
// which is A horizontal-op B.
// At least one of the operands should be a vector shuffle.
if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
return false;
MVT VT = LHS.getSimpleValueType();
assert((VT.is128BitVector() || VT.is256BitVector()) &&
"Unsupported vector type for horizontal add/sub");
// Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
// operate independently on 128-bit lanes.
unsigned NumElts = VT.getVectorNumElements();
unsigned NumLanes = VT.getSizeInBits()/128;
unsigned NumLaneElts = NumElts / NumLanes;
assert((NumLaneElts % 2 == 0) &&
"Vector type should have an even number of elements in each lane");
unsigned HalfLaneElts = NumLaneElts/2;
// View LHS in the form
// LHS = VECTOR_SHUFFLE A, B, LMask
// If LHS is not a shuffle then pretend it is the shuffle
// LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
// NOTE: in what follows a default initialized SDValue represents an UNDEF of
// type VT.
SDValue A, B;
SmallVector<int, 16> LMask(NumElts);
if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
if (!LHS.getOperand(0).isUndef())
A = LHS.getOperand(0);
if (!LHS.getOperand(1).isUndef())
B = LHS.getOperand(1);
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
std::copy(Mask.begin(), Mask.end(), LMask.begin());
} else {
if (!LHS.isUndef())
A = LHS;
for (unsigned i = 0; i != NumElts; ++i)
LMask[i] = i;
}
// Likewise, view RHS in the form
// RHS = VECTOR_SHUFFLE C, D, RMask
SDValue C, D;
SmallVector<int, 16> RMask(NumElts);
if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
if (!RHS.getOperand(0).isUndef())
C = RHS.getOperand(0);
if (!RHS.getOperand(1).isUndef())
D = RHS.getOperand(1);
ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
std::copy(Mask.begin(), Mask.end(), RMask.begin());
} else {
if (!RHS.isUndef())
C = RHS;
for (unsigned i = 0; i != NumElts; ++i)
RMask[i] = i;
}
// Check that the shuffles are both shuffling the same vectors.
if (!(A == C && B == D) && !(A == D && B == C))
return false;
// If everything is UNDEF then bail out: it would be better to fold to UNDEF.
if (!A.getNode() && !B.getNode())
return false;
// If A and B occur in reverse order in RHS, then "swap" them (which means
// rewriting the mask).
if (A != C)
ShuffleVectorSDNode::commuteMask(RMask);
// At this point LHS and RHS are equivalent to
// LHS = VECTOR_SHUFFLE A, B, LMask
// RHS = VECTOR_SHUFFLE A, B, RMask
// Check that the masks correspond to performing a horizontal operation.
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
for (unsigned i = 0; i != NumLaneElts; ++i) {
int LIdx = LMask[i+l], RIdx = RMask[i+l];
// Ignore any UNDEF components.
if (LIdx < 0 || RIdx < 0 ||
(!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
(!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
continue;
// Check that successive elements are being operated on. If not, this is
// not a horizontal operation.
unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
if (!(LIdx == Index && RIdx == Index + 1) &&
!(IsCommutative && LIdx == Index + 1 && RIdx == Index))
return false;
}
}
LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
return true;
}
/// Do target-specific dag combines on floating-point adds/subs.
static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
bool IsFadd = N->getOpcode() == ISD::FADD;
assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
// Try to synthesize horizontal add/sub from adds/subs of shuffles.
if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
(Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
isHorizontalBinOp(LHS, RHS, IsFadd)) {
auto NewOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS);
}
return SDValue();
}
/// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
/// the codegen.
/// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
const SDLoc &DL) {
assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
SDValue Src = N->getOperand(0);
unsigned Opcode = Src.getOpcode();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = N->getValueType(0);
EVT SrcVT = Src.getValueType();
auto IsRepeatedOpOrFreeTruncation = [VT](SDValue Op0, SDValue Op1) {
unsigned TruncSizeInBits = VT.getScalarSizeInBits();
// Repeated operand, so we are only trading one output truncation for
// one input truncation.
if (Op0 == Op1)
return true;
// See if either operand has been extended from a smaller/equal size to
// the truncation size, allowing a truncation to combine with the extend.
unsigned Opcode0 = Op0.getOpcode();
if ((Opcode0 == ISD::ANY_EXTEND || Opcode0 == ISD::SIGN_EXTEND ||
Opcode0 == ISD::ZERO_EXTEND) &&
Op0.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
return true;
unsigned Opcode1 = Op1.getOpcode();
if ((Opcode1 == ISD::ANY_EXTEND || Opcode1 == ISD::SIGN_EXTEND ||
Opcode1 == ISD::ZERO_EXTEND) &&
Op1.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
return true;
// See if either operand is a single use constant which can be constant
// folded.
SDValue BC0 = peekThroughOneUseBitcasts(Op0);
SDValue BC1 = peekThroughOneUseBitcasts(Op1);
return ISD::isBuildVectorOfConstantSDNodes(BC0.getNode()) ||
ISD::isBuildVectorOfConstantSDNodes(BC1.getNode());
};
auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
return DAG.getNode(Opcode, DL, VT, Trunc0, Trunc1);
};
// Don't combine if the operation has other uses.
if (!N->isOnlyUserOf(Src.getNode()))
return SDValue();
// Only support vector truncation for now.
// TODO: i64 scalar math would benefit as well.
if (!VT.isVector())
return SDValue();
// In most cases its only worth pre-truncating if we're only facing the cost
// of one truncation.
// i.e. if one of the inputs will constant fold or the input is repeated.
switch (Opcode) {
case ISD::AND:
case ISD::XOR:
case ISD::OR: {
SDValue Op0 = Src.getOperand(0);
SDValue Op1 = Src.getOperand(1);
if (TLI.isOperationLegalOrPromote(Opcode, VT) &&
IsRepeatedOpOrFreeTruncation(Op0, Op1))
return TruncateArithmetic(Op0, Op1);
break;
}
case ISD::MUL:
// X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
// better to truncate if we have the chance.
if (SrcVT.getScalarType() == MVT::i64 && TLI.isOperationLegal(Opcode, VT) &&
!TLI.isOperationLegal(Opcode, SrcVT))
return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
LLVM_FALLTHROUGH;
case ISD::ADD: {
// TODO: ISD::SUB should be here but interferes with combineSubToSubus.
SDValue Op0 = Src.getOperand(0);
SDValue Op1 = Src.getOperand(1);
if (TLI.isOperationLegal(Opcode, VT) &&
IsRepeatedOpOrFreeTruncation(Op0, Op1))
return TruncateArithmetic(Op0, Op1);
break;
}
}
return SDValue();
}
/// Truncate using ISD::AND mask and X86ISD::PACKUS.
static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = N->getOperand(0);
EVT InVT = In.getValueType();
EVT InSVT = InVT.getVectorElementType();
EVT OutVT = N->getValueType(0);
EVT OutSVT = OutVT.getVectorElementType();
// Split a long vector into vectors of legal type and mask to unset all bits
// that won't appear in the result to prevent saturation.
// TODO - we should be doing this at the maximum legal size but this is
// causing regressions where we're concatenating back to max width just to
// perform the AND and then extracting back again.....
unsigned NumSubRegs = InVT.getSizeInBits() / 128;
unsigned NumSubRegElts = 128 / InSVT.getSizeInBits();
EVT SubRegVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubRegElts);
SmallVector<SDValue, 8> SubVecs(NumSubRegs);
APInt Mask =
APInt::getLowBitsSet(InSVT.getSizeInBits(), OutSVT.getSizeInBits());
SDValue MaskVal = DAG.getConstant(Mask, DL, SubRegVT);
for (unsigned i = 0; i < NumSubRegs; i++) {
SDValue Sub = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubRegVT, In,
DAG.getIntPtrConstant(i * NumSubRegElts, DL));
SubVecs[i] = DAG.getNode(ISD::AND, DL, SubRegVT, Sub, MaskVal);
}
In = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, SubVecs);
return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
}
/// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
const X86Subtarget &Subtarget,
SelectionDAG &DAG) {
SDValue In = N->getOperand(0);
EVT InVT = In.getValueType();
EVT OutVT = N->getValueType(0);
In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
DAG.getValueType(OutVT));
return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
}
/// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
/// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
/// legalization the truncation will be translated into a BUILD_VECTOR with each
/// element that is extracted from a vector and then truncated, and it is
/// difficult to do this optimization based on them.
static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT OutVT = N->getValueType(0);
if (!OutVT.isVector())
return SDValue();
SDValue In = N->getOperand(0);
if (!In.getValueType().isSimple())
return SDValue();
EVT InVT = In.getValueType();
unsigned NumElems = OutVT.getVectorNumElements();
// TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
// SSE2, and we need to take care of it specially.
// AVX512 provides vpmovdb.
if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
return SDValue();
EVT OutSVT = OutVT.getVectorElementType();
EVT InSVT = InVT.getVectorElementType();
if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
(OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
NumElems >= 8))
return SDValue();
// SSSE3's pshufb results in less instructions in the cases below.
if (Subtarget.hasSSSE3() && NumElems == 8 &&
((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
(InSVT == MVT::i32 && OutSVT == MVT::i16)))
return SDValue();
SDLoc DL(N);
// SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
// for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
// truncate 2 x v4i32 to v8i16.
if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
if (InSVT == MVT::i32)
return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
return SDValue();
}
/// This function transforms vector truncation of 'extended sign-bits' or
/// 'extended zero-bits' values.
/// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Requires SSE2 but AVX512 has fast truncate.
if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
return SDValue();
if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
return SDValue();
SDValue In = N->getOperand(0);
if (!In.getValueType().isSimple())
return SDValue();
MVT VT = N->getValueType(0).getSimpleVT();
MVT SVT = VT.getScalarType();
MVT InVT = In.getValueType().getSimpleVT();
MVT InSVT = InVT.getScalarType();
// Check we have a truncation suited for PACKSS/PACKUS.
if (!VT.is128BitVector() && !VT.is256BitVector())
return SDValue();
if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
return SDValue();
if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
return SDValue();
unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
// Use PACKUS if the input has zero-bits that extend all the way to the
// packed/truncated value. e.g. masks, zext_in_reg, etc.
KnownBits Known;
DAG.computeKnownBits(In, Known);
unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
// Use PACKSS if the input has sign-bits that extend all the way to the
// packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
unsigned NumSignBits = DAG.ComputeNumSignBits(In);
if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits))
return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
return SDValue();
}
// Try to form a MULHU or MULHS node by looking for
// (trunc (srl (mul ext, ext), 16))
// TODO: This is X86 specific because we want to be able to handle wide types
// before type legalization. But we can only do it if the vector will be
// legalized via widening/splitting. Type legalization can't handle promotion
// of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
// combiner.
static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
SelectionDAG &DAG, const X86Subtarget &Subtarget) {
// First instruction should be a right shift of a multiply.
if (Src.getOpcode() != ISD::SRL ||
Src.getOperand(0).getOpcode() != ISD::MUL)
return SDValue();
if (!Subtarget.hasSSE2())
return SDValue();
// Only handle vXi16 types that are at least 128-bits.
if (!VT.isVector() || VT.getVectorElementType() != MVT::i16 ||
VT.getVectorNumElements() < 8)
return SDValue();
// Input type should be vXi32.
EVT InVT = Src.getValueType();
if (InVT.getVectorElementType() != MVT::i32)
return SDValue();
// Need a shift by 16.
APInt ShiftAmt;
if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
ShiftAmt != 16)
return SDValue();
SDValue LHS = Src.getOperand(0).getOperand(0);
SDValue RHS = Src.getOperand(0).getOperand(1);
unsigned ExtOpc = LHS.getOpcode();
if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
RHS.getOpcode() != ExtOpc)
return SDValue();
// Peek through the extends.
LHS = LHS.getOperand(0);
RHS = RHS.getOperand(0);
// Ensure the input types match.
if (LHS.getValueType() != VT || RHS.getValueType() != VT)
return SDValue();
unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
return DAG.getNode(Opc, DL, VT, LHS, RHS);
}
// Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
// from one vector with signed bytes from another vector, adds together
// adjacent pairs of 16-bit products, and saturates the result before
// truncating to 16-bits.
//
// Which looks something like this:
// (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
// (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
const SDLoc &DL) {
if (!VT.isVector() || !Subtarget.hasSSSE3())
return SDValue();
unsigned NumElems = VT.getVectorNumElements();
EVT ScalarVT = VT.getVectorElementType();
if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
return SDValue();
SDValue SSatVal = detectSSatPattern(In, VT);
if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
return SDValue();
// Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
// of multiplies from even/odd elements.
SDValue N0 = SSatVal.getOperand(0);
SDValue N1 = SSatVal.getOperand(1);
if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
return SDValue();
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
SDValue N10 = N1.getOperand(0);
SDValue N11 = N1.getOperand(1);
// TODO: Handle constant vectors and use knownbits/computenumsignbits?
// Canonicalize zero_extend to LHS.
if (N01.getOpcode() == ISD::ZERO_EXTEND)
std::swap(N00, N01);
if (N11.getOpcode() == ISD::ZERO_EXTEND)
std::swap(N10, N11);
// Ensure we have a zero_extend and a sign_extend.
if (N00.getOpcode() != ISD::ZERO_EXTEND ||
N01.getOpcode() != ISD::SIGN_EXTEND ||
N10.getOpcode() != ISD::ZERO_EXTEND ||
N11.getOpcode() != ISD::SIGN_EXTEND)
return SDValue();
// Peek through the extends.
N00 = N00.getOperand(0);
N01 = N01.getOperand(0);
N10 = N10.getOperand(0);
N11 = N11.getOperand(0);
// Ensure the extend is from vXi8.
if (N00.getValueType().getVectorElementType() != MVT::i8 ||
N01.getValueType().getVectorElementType() != MVT::i8 ||
N10.getValueType().getVectorElementType() != MVT::i8 ||
N11.getValueType().getVectorElementType() != MVT::i8)
return SDValue();
// All inputs should be build_vectors.
if (N00.getOpcode() != ISD::BUILD_VECTOR ||
N01.getOpcode() != ISD::BUILD_VECTOR ||
N10.getOpcode() != ISD::BUILD_VECTOR ||
N11.getOpcode() != ISD::BUILD_VECTOR)
return SDValue();
// N00/N10 are zero extended. N01/N11 are sign extended.
// For each element, we need to ensure we have an odd element from one vector
// multiplied by the odd element of another vector and the even element from
// one of the same vectors being multiplied by the even element from the
// other vector. So we need to make sure for each element i, this operator
// is being performed:
// A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
SDValue ZExtIn, SExtIn;
for (unsigned i = 0; i != NumElems; ++i) {
SDValue N00Elt = N00.getOperand(i);
SDValue N01Elt = N01.getOperand(i);
SDValue N10Elt = N10.getOperand(i);
SDValue N11Elt = N11.getOperand(i);
// TODO: Be more tolerant to undefs.
if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
return SDValue();
unsigned IdxN00 = ConstN00Elt->getZExtValue();
unsigned IdxN01 = ConstN01Elt->getZExtValue();
unsigned IdxN10 = ConstN10Elt->getZExtValue();
unsigned IdxN11 = ConstN11Elt->getZExtValue();
// Add is commutative so indices can be reordered.
if (IdxN00 > IdxN10) {
std::swap(IdxN00, IdxN10);
std::swap(IdxN01, IdxN11);
}
// N0 indices be the even element. N1 indices must be the next odd element.
if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
return SDValue();
SDValue N00In = N00Elt.getOperand(0);
SDValue N01In = N01Elt.getOperand(0);
SDValue N10In = N10Elt.getOperand(0);
SDValue N11In = N11Elt.getOperand(0);
// First time we find an input capture it.
if (!ZExtIn) {
ZExtIn = N00In;
SExtIn = N01In;
}
if (ZExtIn != N00In || SExtIn != N01In ||
ZExtIn != N10In || SExtIn != N11In)
return SDValue();
}
auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
// Shrink by adding truncate nodes and let DAGCombine fold with the
// sources.
EVT InVT = Ops[0].getValueType();
assert(InVT.getScalarType() == MVT::i8 &&
"Unexpected scalar element type");
assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
InVT.getVectorNumElements() / 2);
return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
};
return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
PMADDBuilder);
}
static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT VT = N->getValueType(0);
SDValue Src = N->getOperand(0);
SDLoc DL(N);
// Attempt to pre-truncate inputs to arithmetic ops instead.
if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
return V;
// Try to detect AVG pattern first.
if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
return Avg;
// Try to detect PMADD
if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
return PMAdd;
// Try to combine truncation with signed/unsigned saturation.
if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
return Val;
// Try to combine PMULHUW/PMULHW for vXi16.
if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
return V;
// The bitcast source is a direct mmx result.
// Detect bitcasts between i32 to x86mmx
if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
SDValue BCSrc = Src.getOperand(0);
if (BCSrc.getValueType() == MVT::x86mmx)
return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
}
// Try to truncate extended sign/zero bits with PACKSS/PACKUS.
if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
return V;
return combineVectorTruncation(N, DAG, Subtarget);
}
/// Returns the negated value if the node \p N flips sign of FP value.
///
/// FP-negation node may have different forms: FNEG(x) or FXOR (x, 0x80000000).
/// AVX512F does not have FXOR, so FNEG is lowered as
/// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
/// In this case we go though all bitcasts.
static SDValue isFNEG(SDNode *N) {
if (N->getOpcode() == ISD::FNEG)
return N->getOperand(0);
SDValue Op = peekThroughBitcasts(SDValue(N, 0));
if (Op.getOpcode() != X86ISD::FXOR && Op.getOpcode() != ISD::XOR)
return SDValue();
SDValue Op1 = peekThroughBitcasts(Op.getOperand(1));
if (!Op1.getValueType().isFloatingPoint())
return SDValue();
// Extract constant bits and see if they are all sign bit masks.
APInt UndefElts;
SmallVector<APInt, 16> EltBits;
if (getTargetConstantBitsFromNode(Op1, Op1.getScalarValueSizeInBits(),
UndefElts, EltBits, false, false))
if (llvm::all_of(EltBits, [](APInt &I) { return I.isSignMask(); }))
return peekThroughBitcasts(Op.getOperand(0));
return SDValue();
}
/// Do target-specific dag combines on floating point negations.
static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
EVT OrigVT = N->getValueType(0);
SDValue Arg = isFNEG(N);
assert(Arg.getNode() && "N is expected to be an FNEG node");
EVT VT = Arg.getValueType();
EVT SVT = VT.getScalarType();
SDLoc DL(N);
// Let legalize expand this if it isn't a legal type yet.
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
// If we're negating a FMUL node on a target with FMA, then we can avoid the
// use of a constant by performing (-0 - A*B) instead.
// FIXME: Check rounding control flags as well once it becomes available.
if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
Arg.getOperand(1), Zero);
return DAG.getBitcast(OrigVT, NewNode);
}
// If we're negating an FMA node, then we can adjust the
// instruction to include the extra negation.
unsigned NewOpcode = 0;
if (Arg.hasOneUse() && Subtarget.hasAnyFMA()) {
switch (Arg.getOpcode()) {
case ISD::FMA: NewOpcode = X86ISD::FNMSUB; break;
case X86ISD::FMSUB: NewOpcode = X86ISD::FNMADD; break;
case X86ISD::FNMADD: NewOpcode = X86ISD::FMSUB; break;
case X86ISD::FNMSUB: NewOpcode = ISD::FMA; break;
case X86ISD::FMADD_RND: NewOpcode = X86ISD::FNMSUB_RND; break;
case X86ISD::FMSUB_RND: NewOpcode = X86ISD::FNMADD_RND; break;
case X86ISD::FNMADD_RND: NewOpcode = X86ISD::FMSUB_RND; break;
case X86ISD::FNMSUB_RND: NewOpcode = X86ISD::FMADD_RND; break;
// We can't handle scalar intrinsic node here because it would only
// invert one element and not the whole vector. But we could try to handle
// a negation of the lower element only.
}
}
if (NewOpcode)
return DAG.getBitcast(OrigVT, DAG.getNode(NewOpcode, DL, VT,
Arg.getNode()->ops()));
return SDValue();
}
static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = N->getSimpleValueType(0);
// If we have integer vector types available, use the integer opcodes.
if (VT.isVector() && Subtarget.hasSSE2()) {
SDLoc dl(N);
MVT IntVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
unsigned IntOpcode;
switch (N->getOpcode()) {
default: llvm_unreachable("Unexpected FP logic op");
case X86ISD::FOR: IntOpcode = ISD::OR; break;
case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
case X86ISD::FAND: IntOpcode = ISD::AND; break;
case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
}
SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
return DAG.getBitcast(VT, IntOp);
}
return SDValue();
}
/// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
if (N->getOpcode() != ISD::XOR)
return SDValue();
SDValue LHS = N->getOperand(0);
auto *RHSC = dyn_cast<ConstantSDNode>(N->getOperand(1));
if (!RHSC || RHSC->getZExtValue() != 1 || LHS->getOpcode() != X86ISD::SETCC)
return SDValue();
X86::CondCode NewCC = X86::GetOppositeBranchCondition(
X86::CondCode(LHS->getConstantOperandVal(0)));
SDLoc DL(N);
return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
}
static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
// If this is SSE1 only convert to FXOR to avoid scalarization.
if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() &&
N->getValueType(0) == MVT::v4i32) {
return DAG.getBitcast(
MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
}
if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
return Cmp;
if (DCI.isBeforeLegalizeOps())
return SDValue();
if (SDValue SetCC = foldXor1SetCC(N, DAG))
return SetCC;
if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
return RV;
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
return FPLogic;
if (isFNEG(N))
return combineFneg(N, DAG, Subtarget);
return SDValue();
}
static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
EVT VT = N->getValueType(0);
unsigned NumBits = VT.getSizeInBits();
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
// TODO - Constant Folding.
if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
// Reduce Cst1 to the bottom 16-bits.
// NOTE: SimplifyDemandedBits won't do this for constants.
const APInt &Val1 = Cst1->getAPIntValue();
APInt MaskedVal1 = Val1 & 0xFFFF;
if (MaskedVal1 != Val1)
return DAG.getNode(X86ISD::BEXTR, SDLoc(N), VT, Op0,
DAG.getConstant(MaskedVal1, SDLoc(N), VT));
}
// Only bottom 16-bits of the control bits are required.
KnownBits Known;
APInt DemandedMask(APInt::getLowBitsSet(NumBits, 16));
if (TLI.SimplifyDemandedBits(Op1, DemandedMask, Known, TLO)) {
DCI.CommitTargetLoweringOpt(TLO);
return SDValue(N, 0);
}
return SDValue();
}
static bool isNullFPScalarOrVectorConst(SDValue V) {
return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
}
/// If a value is a scalar FP zero or a vector FP zero (potentially including
/// undefined elements), return a zero constant that may be used to fold away
/// that value. In the case of a vector, the returned constant will not contain
/// undefined elements even if the input parameter does. This makes it suitable
/// to be used as a replacement operand with operations (eg, bitwise-and) where
/// an undef should not propagate.
static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!isNullFPScalarOrVectorConst(V))
return SDValue();
if (V.getValueType().isVector())
return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
return V;
}
static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
SDLoc DL(N);
// Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
(VT == MVT::f64 && Subtarget.hasSSE2()) ||
(VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
return SDValue();
auto isAllOnesConstantFP = [](SDValue V) {
if (V.getSimpleValueType().isVector())
return ISD::isBuildVectorAllOnes(V.getNode());
auto *C = dyn_cast<ConstantFPSDNode>(V);
return C && C->getConstantFPValue()->isAllOnesValue();
};
// fand (fxor X, -1), Y --> fandn X, Y
if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
// fand X, (fxor Y, -1) --> fandn Y, X
if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
return SDValue();
}
/// Do target-specific dag combines on X86ISD::FAND nodes.
static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// FAND(0.0, x) -> 0.0
if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
return V;
// FAND(x, 0.0) -> 0.0
if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
return V;
if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
return V;
return lowerX86FPLogicOp(N, DAG, Subtarget);
}
/// Do target-specific dag combines on X86ISD::FANDN nodes.
static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// FANDN(0.0, x) -> x
if (isNullFPScalarOrVectorConst(N->getOperand(0)))
return N->getOperand(1);
// FANDN(x, 0.0) -> 0.0
if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
return V;
return lowerX86FPLogicOp(N, DAG, Subtarget);
}
/// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
// F[X]OR(0.0, x) -> x
if (isNullFPScalarOrVectorConst(N->getOperand(0)))
return N->getOperand(1);
// F[X]OR(x, 0.0) -> x
if (isNullFPScalarOrVectorConst(N->getOperand(1)))
return N->getOperand(0);
if (isFNEG(N))
if (SDValue NewVal = combineFneg(N, DAG, Subtarget))
return NewVal;
return lowerX86FPLogicOp(N, DAG, Subtarget);
}
/// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
// Only perform optimizations if UnsafeMath is used.
if (!DAG.getTarget().Options.UnsafeFPMath)
return SDValue();
// If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
// into FMINC and FMAXC, which are Commutative operations.
unsigned NewOp = 0;
switch (N->getOpcode()) {
default: llvm_unreachable("unknown opcode");
case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
}
return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
N->getOperand(0), N->getOperand(1));
}
static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (Subtarget.useSoftFloat())
return SDValue();
// TODO: If an operand is already known to be a NaN or not a NaN, this
// should be an optional swap and FMAX/FMIN.
EVT VT = N->getValueType(0);
if (!((Subtarget.hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
(Subtarget.hasSSE2() && (VT == MVT::f64 || VT == MVT::v2f64)) ||
(Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))))
return SDValue();
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
SDLoc DL(N);
auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
// If we don't have to respect NaN inputs, this is a direct translation to x86
// min/max instructions.
if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
// If we have to respect NaN inputs, this takes at least 3 instructions.
// Favor a library call when operating on a scalar and minimizing code size.
if (!VT.isVector() && DAG.getMachineFunction().getFunction().optForMinSize())
return SDValue();
EVT SetCCType = DAG.getTargetLoweringInfo().getSetCCResultType(
DAG.getDataLayout(), *DAG.getContext(), VT);
// There are 4 possibilities involving NaN inputs, and these are the required
// outputs:
// Op1
// Num NaN
// ----------------
// Num | Max | Op0 |
// Op0 ----------------
// NaN | Op1 | NaN |
// ----------------
//
// The SSE FP max/min instructions were not designed for this case, but rather
// to implement:
// Min = Op1 < Op0 ? Op1 : Op0
// Max = Op1 > Op0 ? Op1 : Op0
//
// So they always return Op0 if either input is a NaN. However, we can still
// use those instructions for fmaxnum by selecting away a NaN input.
// If either operand is NaN, the 2nd source operand (Op0) is passed through.
SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
// If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
// are NaN, the NaN value of Op1 is the result.
return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
}
/// Do target-specific dag combines on X86ISD::ANDNP nodes.
static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
// ANDNP(0, x) -> x
if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
return N->getOperand(1);
// ANDNP(x, 0) -> 0
if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
return getZeroVector(N->getSimpleValueType(0), Subtarget, DAG, SDLoc(N));
EVT VT = N->getValueType(0);
// Attempt to recursively combine a bitmask ANDNP with shuffles.
if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
SDValue Op(N, 0);
if (SDValue Res = combineX86ShufflesRecursively(
{Op}, 0, Op, {0}, {}, /*Depth*/ 1,
/*HasVarMask*/ false, DAG, Subtarget))
return Res;
}
return SDValue();
}
static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
// BT ignores high bits in the bit index operand.
unsigned BitWidth = N1.getValueSizeInBits();
APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
if (SDValue DemandedN1 = DAG.GetDemandedBits(N1, DemandedMask))
return DAG.getNode(X86ISD::BT, SDLoc(N), MVT::i32, N0, DemandedN1);
return SDValue();
}
// Try to combine sext_in_reg of a cmov of constants by extending the constants.
static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
if (ExtraVT != MVT::i16)
return SDValue();
// Look through single use any_extends.
if (N0.getOpcode() == ISD::ANY_EXTEND && N0.hasOneUse())
N0 = N0.getOperand(0);
// See if we have a single use cmov.
if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
return SDValue();
SDValue CMovOp0 = N0.getOperand(0);
SDValue CMovOp1 = N0.getOperand(1);
// Make sure both operands are constants.
if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
!isa<ConstantSDNode>(CMovOp1.getNode()))
return SDValue();
SDLoc DL(N);
// If we looked through an any_extend above, add one to the constants.
if (N0.getValueType() != VT) {
CMovOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, VT, CMovOp0);
CMovOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, VT, CMovOp1);
}
CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, CMovOp0, N1);
CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, CMovOp1, N1);
return DAG.getNode(X86ISD::CMOV, DL, VT, CMovOp0, CMovOp1,
N0.getOperand(2), N0.getOperand(3));
}
static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (SDValue V = combineSextInRegCmov(N, DAG))
return V;
EVT VT = N->getValueType(0);
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
SDLoc dl(N);
// The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
// both SSE and AVX2 since there is no sign-extended shift right
// operation on a vector with 64-bit elements.
//(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
// (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
N0.getOpcode() == ISD::SIGN_EXTEND)) {
SDValue N00 = N0.getOperand(0);
// EXTLOAD has a better solution on AVX2,
// it may be replaced with X86ISD::VSEXT node.
if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
if (!ISD::isNormalLoad(N00.getNode()))
return SDValue();
if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
N00, N1);
return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
}
}
return SDValue();
}
/// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
/// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
/// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
/// opportunities to combine math ops, use an LEA, or use a complex addressing
/// mode. This can eliminate extend, add, and shift instructions.
static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
Ext->getOpcode() != ISD::ZERO_EXTEND)
return SDValue();
// TODO: This should be valid for other integer types.
EVT VT = Ext->getValueType(0);
if (VT != MVT::i64)
return SDValue();
SDValue Add = Ext->getOperand(0);
if (Add.getOpcode() != ISD::ADD)
return SDValue();
bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
bool NSW = Add->getFlags().hasNoSignedWrap();
bool NUW = Add->getFlags().hasNoUnsignedWrap();
// We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
// into the 'zext'
if ((Sext && !NSW) || (!Sext && !NUW))
return SDValue();
// Having a constant operand to the 'add' ensures that we are not increasing
// the instruction count because the constant is extended for free below.
// A constant operand can also become the displacement field of an LEA.
auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
if (!AddOp1)
return SDValue();
// Don't make the 'add' bigger if there's no hope of combining it with some
// other 'add' or 'shl' instruction.
// TODO: It may be profitable to generate simpler LEA instructions in place
// of single 'add' instructions, but the cost model for selecting an LEA
// currently has a high threshold.
bool HasLEAPotential = false;
for (auto *User : Ext->uses()) {
if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
HasLEAPotential = true;
break;
}
}
if (!HasLEAPotential)
return SDValue();
// Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
SDValue AddOp0 = Add.getOperand(0);
SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
// The wider add is guaranteed to not wrap because both operands are
// sign-extended.
SDNodeFlags Flags;
Flags.setNoSignedWrap(NSW);
Flags.setNoUnsignedWrap(NUW);
return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
}
/// (i8,i32 {s/z}ext ({s/u}divrem (i8 x, i8 y)) ->
/// (i8,i32 ({s/u}divrem_sext_hreg (i8 x, i8 y)
/// This exposes the {s/z}ext to the sdivrem lowering, so that it directly
/// extends from AH (which we otherwise need to do contortions to access).
static SDValue getDivRem8(SDNode *N, SelectionDAG &DAG) {
SDValue N0 = N->getOperand(0);
auto OpcodeN = N->getOpcode();
auto OpcodeN0 = N0.getOpcode();
if (!((OpcodeN == ISD::SIGN_EXTEND && OpcodeN0 == ISD::SDIVREM) ||
(OpcodeN == ISD::ZERO_EXTEND && OpcodeN0 == ISD::UDIVREM)))
return SDValue();
EVT VT = N->getValueType(0);
EVT InVT = N0.getValueType();
if (N0.getResNo() != 1 || InVT != MVT::i8 ||
!(VT == MVT::i32 || VT == MVT::i64))
return SDValue();
SDVTList NodeTys = DAG.getVTList(MVT::i8, MVT::i32);
auto DivRemOpcode = OpcodeN0 == ISD::SDIVREM ? X86ISD::SDIVREM8_SEXT_HREG
: X86ISD::UDIVREM8_ZEXT_HREG;
SDValue R = DAG.getNode(DivRemOpcode, SDLoc(N), NodeTys, N0.getOperand(0),
N0.getOperand(1));
DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
// If this was a 64-bit extend, complete it.
if (VT == MVT::i64)
return DAG.getNode(OpcodeN, SDLoc(N), VT, R.getValue(1));
return R.getValue(1);
}
// If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
// operands and the result of CMOV is not used anywhere else - promote CMOV
// itself instead of promoting its result. This could be beneficial, because:
// 1) X86TargetLowering::EmitLoweredSelect later can do merging of two
// (or more) pseudo-CMOVs only when they go one-after-another and
// getting rid of result extension code after CMOV will help that.
// 2) Promotion of constant CMOV arguments is free, hence the
// {ANY,SIGN,ZERO}_EXTEND will just be deleted.
// 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
// promotion is also good in terms of code-size.
// (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
// promotion).
static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
SDValue CMovN = Extend->getOperand(0);
if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
return SDValue();
EVT TargetVT = Extend->getValueType(0);
unsigned ExtendOpcode = Extend->getOpcode();
SDLoc DL(Extend);
EVT VT = CMovN.getValueType();
SDValue CMovOp0 = CMovN.getOperand(0);
SDValue CMovOp1 = CMovN.getOperand(1);
if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
!isa<ConstantSDNode>(CMovOp1.getNode()))
return SDValue();
// Only extend to i32 or i64.
if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
return SDValue();
// Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
// are free.
if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
return SDValue();
// If this a zero extend to i64, we should only extend to i32 and use a free
// zero extend to finish.
EVT ExtendVT = TargetVT;
if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
ExtendVT = MVT::i32;
CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
CMovN.getOperand(2), CMovN.getOperand(3));
// Finish extending if needed.
if (ExtendVT != TargetVT)
Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
return Res;
}
// Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
// This is more or less the reverse of combineBitcastvxi1.
static SDValue
combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
unsigned Opcode = N->getOpcode();
if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
Opcode != ISD::ANY_EXTEND)
return SDValue();
if (!DCI.isBeforeLegalizeOps())
return SDValue();
if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
return SDValue();
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT SVT = VT.getScalarType();
EVT InSVT = N0.getValueType().getScalarType();
unsigned EltSizeInBits = SVT.getSizeInBits();
// Input type must be extending a bool vector (bit-casted from a scalar
// integer) to legal integer types.
if (!VT.isVector())
return SDValue();
if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
return SDValue();
if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
return SDValue();
SDValue N00 = N0.getOperand(0);
EVT SclVT = N0.getOperand(0).getValueType();
if (!SclVT.isScalarInteger())
return SDValue();
SDLoc DL(N);
SDValue Vec;
SmallVector<int, 32> ShuffleMask;
unsigned NumElts = VT.getVectorNumElements();
assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
// Broadcast the scalar integer to the vector elements.
if (NumElts > EltSizeInBits) {
// If the scalar integer is greater than the vector element size, then we
// must split it down into sub-sections for broadcasting. For example:
// i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
// i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
unsigned Scale = NumElts / EltSizeInBits;
EVT BroadcastVT =
EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
Vec = DAG.getBitcast(VT, Vec);
for (unsigned i = 0; i != Scale; ++i)
ShuffleMask.append(EltSizeInBits, i);
} else {
// For smaller scalar integers, we can simply any-extend it to the vector
// element size (we don't care about the upper bits) and broadcast it to all
// elements.
SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
ShuffleMask.append(NumElts, 0);
}
Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
// Now, mask the relevant bit in each element.
SmallVector<SDValue, 32> Bits;
for (unsigned i = 0; i != NumElts; ++i) {
int BitIdx = (i % EltSizeInBits);
APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
Bits.push_back(DAG.getConstant(Bit, DL, SVT));
}
SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
// Compare against the bitmask and extend the result.
EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
// For SEXT, this is now done, otherwise shift the result down for
// zero-extension.
if (Opcode == ISD::SIGN_EXTEND)
return Vec;
return DAG.getNode(ISD::SRL, DL, VT, Vec,
DAG.getConstant(EltSizeInBits - 1, DL, VT));
}
/// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or
/// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating
/// with UNDEFs) of the input to vectors of the same size as the target type
/// which then extends the lowest elements.
static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
unsigned Opcode = N->getOpcode();
if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND)
return SDValue();
if (!DCI.isBeforeLegalizeOps())
return SDValue();
if (!Subtarget.hasSSE2())
return SDValue();
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT SVT = VT.getScalarType();
EVT InVT = N0.getValueType();
EVT InSVT = InVT.getScalarType();
// Input type must be a vector and we must be extending legal integer types.
if (!VT.isVector())
return SDValue();
if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
return SDValue();
if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
return SDValue();
// On AVX2+ targets, if the input/output types are both legal then we will be
// able to use SIGN_EXTEND/ZERO_EXTEND directly.
if (Subtarget.hasInt256() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
DAG.getTargetLoweringInfo().isTypeLegal(InVT))
return SDValue();
SDLoc DL(N);
auto ExtendVecSize = [&DAG](const SDLoc &DL, SDValue N, unsigned Size) {
EVT InVT = N.getValueType();
EVT OutVT = EVT::getVectorVT(*DAG.getContext(), InVT.getScalarType(),
Size / InVT.getScalarSizeInBits());
SmallVector<SDValue, 8> Opnds(Size / InVT.getSizeInBits(),
DAG.getUNDEF(InVT));
Opnds[0] = N;
return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Opnds);
};
// If target-size is less than 128-bits, extend to a type that would extend
// to 128 bits, extend that and extract the original target vector.
if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits())) {
unsigned Scale = 128 / VT.getSizeInBits();
EVT ExVT =
EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits());
SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits());
SDValue SExt = DAG.getNode(Opcode, DL, ExVT, Ex);
return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt,
DAG.getIntPtrConstant(0, DL));
}
// If target-size is 128-bits (or 256-bits on AVX2 target), then convert to
// ISD::*_EXTEND_VECTOR_INREG which ensures lowering to X86ISD::V*EXT.
// Also use this if we don't have SSE41 to allow the legalizer do its job.
if (!Subtarget.hasSSE41() || VT.is128BitVector() ||
(VT.is256BitVector() && Subtarget.hasInt256()) ||
(VT.is512BitVector() && Subtarget.useAVX512Regs())) {
SDValue ExOp = ExtendVecSize(DL, N0, VT.getSizeInBits());
return Opcode == ISD::SIGN_EXTEND
? DAG.getSignExtendVectorInReg(ExOp, DL, VT)
: DAG.getZeroExtendVectorInReg(ExOp, DL, VT);
}
auto SplitAndExtendInReg = [&](unsigned SplitSize) {
unsigned NumVecs = VT.getSizeInBits() / SplitSize;
unsigned NumSubElts = SplitSize / SVT.getSizeInBits();
EVT SubVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumSubElts);
EVT InSubVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubElts);
SmallVector<SDValue, 8> Opnds;
for (unsigned i = 0, Offset = 0; i != NumVecs; ++i, Offset += NumSubElts) {
SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0,
DAG.getIntPtrConstant(Offset, DL));
SrcVec = ExtendVecSize(DL, SrcVec, SplitSize);
SrcVec = Opcode == ISD::SIGN_EXTEND
? DAG.getSignExtendVectorInReg(SrcVec, DL, SubVT)
: DAG.getZeroExtendVectorInReg(SrcVec, DL, SubVT);
Opnds.push_back(SrcVec);
}
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds);
};
// On pre-AVX2 targets, split into 128-bit nodes of
// ISD::*_EXTEND_VECTOR_INREG.
if (!Subtarget.hasInt256() && !(VT.getSizeInBits() % 128))
return SplitAndExtendInReg(128);
// On pre-AVX512 targets, split into 256-bit nodes of
// ISD::*_EXTEND_VECTOR_INREG.
if (!Subtarget.useAVX512Regs() && !(VT.getSizeInBits() % 256))
return SplitAndExtendInReg(256);
return SDValue();
}
// Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
// result type.
static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
SDLoc dl(N);
// Only do this combine with AVX512 for vector extends.
if (!Subtarget.hasAVX512() || !VT.isVector() || N0->getOpcode() != ISD::SETCC)
return SDValue();
// Only combine legal element types.
EVT SVT = VT.getVectorElementType();
if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
return SDValue();
// We can only do this if the vector size in 256 bits or less.
unsigned Size = VT.getSizeInBits();
if (Size > 256)
return SDValue();
// Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
// that's the only integer compares with we have.
ISD::CondCode CC = cast<CondCodeSDNode>(N0->getOperand(2))->get();
if (ISD::isUnsignedIntSetCC(CC))
return SDValue();
// Only do this combine if the extension will be fully consumed by the setcc.
EVT N00VT = N0.getOperand(0).getValueType();
EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
if (Size != MatchingVecType.getSizeInBits())
return SDValue();
SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
if (N->getOpcode() == ISD::ZERO_EXTEND)
Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType().getScalarType());
return Res;
}
static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT InVT = N0.getValueType();
SDLoc DL(N);
if (SDValue DivRem8 = getDivRem8(N, DAG))
return DivRem8;
if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
return NewCMov;
if (!DCI.isBeforeLegalizeOps())
return SDValue();
if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
return V;
if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
// Invert and sign-extend a boolean is the same as zero-extend and subtract
// 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
// lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
// sext (xor Bool, -1) --> sub (zext Bool), 1
SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
}
if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
return V;
if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
return V;
if (VT.isVector())
if (SDValue R = WidenMaskArithmetic(N, DAG, Subtarget))
return R;
if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
return NewAdd;
return SDValue();
}
static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc) {
if (NegMul) {
switch (Opcode) {
default: llvm_unreachable("Unexpected opcode");
case ISD::FMA: Opcode = X86ISD::FNMADD; break;
case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break;
case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break;
case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break;
case X86ISD::FNMADD: Opcode = ISD::FMA; break;
case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break;
case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break;
case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break;
}
}
if (NegAcc) {
switch (Opcode) {
default: llvm_unreachable("Unexpected opcode");
case ISD::FMA: Opcode = X86ISD::FMSUB; break;
case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
case X86ISD::FMSUB: Opcode = ISD::FMA; break;
case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break;
case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break;
case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
}
}
return Opcode;
}
static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
// Let legalize expand this if it isn't a legal type yet.
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
EVT ScalarVT = VT.getScalarType();
if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
return SDValue();
SDValue A = N->getOperand(0);
SDValue B = N->getOperand(1);
SDValue C = N->getOperand(2);
auto invertIfNegative = [&DAG](SDValue &V) {
if (SDValue NegVal = isFNEG(V.getNode())) {
V = DAG.getBitcast(V.getValueType(), NegVal);
return true;
}
// Look through extract_vector_elts. If it comes from an FNEG, create a
// new extract from the FNEG input.
if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
isNullConstant(V.getOperand(1))) {
if (SDValue NegVal = isFNEG(V.getOperand(0).getNode())) {
NegVal = DAG.getBitcast(V.getOperand(0).getValueType(), NegVal);
V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
NegVal, V.getOperand(1));
return true;
}
}
return false;
};
// Do not convert the passthru input of scalar intrinsics.
// FIXME: We could allow negations of the lower element only.
bool NegA = invertIfNegative(A);
bool NegB = invertIfNegative(B);
bool NegC = invertIfNegative(C);
if (!NegA && !NegB && !NegC)
return SDValue();
unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC);
if (N->getNumOperands() == 4)
return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
return DAG.getNode(NewOpcode, dl, VT, A, B, C);
}
// Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc dl(N);
EVT VT = N->getValueType(0);
SDValue NegVal = isFNEG(N->getOperand(2).getNode());
if (!NegVal)
return SDValue();
unsigned NewOpcode;
switch (N->getOpcode()) {
default: llvm_unreachable("Unexpected opcode!");
case X86ISD::FMADDSUB: NewOpcode = X86ISD::FMSUBADD; break;
case X86ISD::FMADDSUB_RND: NewOpcode = X86ISD::FMSUBADD_RND; break;
case X86ISD::FMSUBADD: NewOpcode = X86ISD::FMADDSUB; break;
case X86ISD::FMSUBADD_RND: NewOpcode = X86ISD::FMADDSUB_RND; break;
}
if (N->getNumOperands() == 4)
return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
NegVal, N->getOperand(3));
return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
NegVal);
}
static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
// (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
// (and (i32 x86isd::setcc_carry), 1)
// This eliminates the zext. This transformation is necessary because
// ISD::SETCC is always legalized to i8.
SDLoc dl(N);
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
if (N0.getOpcode() == ISD::AND &&
N0.hasOneUse() &&
N0.getOperand(0).hasOneUse()) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
if (!isOneConstant(N0.getOperand(1)))
return SDValue();
return DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
N00.getOperand(0), N00.getOperand(1)),
DAG.getConstant(1, dl, VT));
}
}
if (N0.getOpcode() == ISD::TRUNCATE &&
N0.hasOneUse() &&
N0.getOperand(0).hasOneUse()) {
SDValue N00 = N0.getOperand(0);
if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
return DAG.getNode(ISD::AND, dl, VT,
DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
N00.getOperand(0), N00.getOperand(1)),
DAG.getConstant(1, dl, VT));
}
}
if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
return NewCMov;
if (DCI.isBeforeLegalizeOps())
if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
return V;
if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget))
return V;
if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
return V;
if (VT.isVector())
if (SDValue R = WidenMaskArithmetic(N, DAG, Subtarget))
return R;
if (SDValue DivRem8 = getDivRem8(N, DAG))
return DivRem8;
if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
return NewAdd;
if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
return R;
return SDValue();
}
/// Try to map a 128-bit or larger integer comparison to vector instructions
/// before type legalization splits it up into chunks.
static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
// We're looking for an oversized integer equality comparison.
SDValue X = SetCC->getOperand(0);
SDValue Y = SetCC->getOperand(1);
EVT OpVT = X.getValueType();
unsigned OpSize = OpVT.getSizeInBits();
if (!OpVT.isScalarInteger() || OpSize < 128)
return SDValue();
// Ignore a comparison with zero because that gets special treatment in
// EmitTest(). But make an exception for the special case of a pair of
// logically-combined vector-sized operands compared to zero. This pattern may
// be generated by the memcmp expansion pass with oversized integer compares
// (see PR33325).
bool IsOrXorXorCCZero = isNullConstant(Y) && X.getOpcode() == ISD::OR &&
X.getOperand(0).getOpcode() == ISD::XOR &&
X.getOperand(1).getOpcode() == ISD::XOR;
if (isNullConstant(Y) && !IsOrXorXorCCZero)
return SDValue();
// Bail out if we know that this is not really just an oversized integer.
if (peekThroughBitcasts(X).getValueType() == MVT::f128 ||
peekThroughBitcasts(Y).getValueType() == MVT::f128)
return SDValue();
// TODO: Use PXOR + PTEST for SSE4.1 or later?
// TODO: Add support for AVX-512.
EVT VT = SetCC->getValueType(0);
SDLoc DL(SetCC);
if ((OpSize == 128 && Subtarget.hasSSE2()) ||
(OpSize == 256 && Subtarget.hasAVX2())) {
EVT VecVT = OpSize == 128 ? MVT::v16i8 : MVT::v32i8;
SDValue Cmp;
if (IsOrXorXorCCZero) {
// This is a bitwise-combined equality comparison of 2 pairs of vectors:
// setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
// Use 2 vector equality compares and 'and' the results before doing a
// MOVMSK.
SDValue A = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(0));
SDValue B = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(1));
SDValue C = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(0));
SDValue D = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(1));
SDValue Cmp1 = DAG.getSetCC(DL, VecVT, A, B, ISD::SETEQ);
SDValue Cmp2 = DAG.getSetCC(DL, VecVT, C, D, ISD::SETEQ);
Cmp = DAG.getNode(ISD::AND, DL, VecVT, Cmp1, Cmp2);
} else {
SDValue VecX = DAG.getBitcast(VecVT, X);
SDValue VecY = DAG.getBitcast(VecVT, Y);
Cmp = DAG.getSetCC(DL, VecVT, VecX, VecY, ISD::SETEQ);
}
// If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
// setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
// setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
// setcc i256 X, Y, eq --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, eq
// setcc i256 X, Y, ne --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, ne
SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
SDValue FFFFs = DAG.getConstant(OpSize == 128 ? 0xFFFF : 0xFFFFFFFF, DL,
MVT::i32);
return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
}
return SDValue();
}
static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
EVT VT = N->getValueType(0);
EVT OpVT = LHS.getValueType();
SDLoc DL(N);
if (CC == ISD::SETNE || CC == ISD::SETEQ) {
// 0-x == y --> x+y == 0
// 0-x != y --> x+y != 0
if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
LHS.hasOneUse()) {
SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, RHS, LHS.getOperand(1));
return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
}
// x == 0-y --> x+y == 0
// x != 0-y --> x+y != 0
if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
RHS.hasOneUse()) {
SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
}
if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
return V;
}
if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
(CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
// Put build_vectors on the right.
if (LHS.getOpcode() == ISD::BUILD_VECTOR) {
std::swap(LHS, RHS);
CC = ISD::getSetCCSwappedOperands(CC);
}
bool IsSEXT0 =
(LHS.getOpcode() == ISD::SIGN_EXTEND) &&
(LHS.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
if (IsSEXT0 && IsVZero1) {
assert(VT == LHS.getOperand(0).getValueType() &&
"Uexpected operand type");
if (CC == ISD::SETGT)
return DAG.getConstant(0, DL, VT);
if (CC == ISD::SETLE)
return DAG.getConstant(1, DL, VT);
if (CC == ISD::SETEQ || CC == ISD::SETGE)
return DAG.getNOT(DL, LHS.getOperand(0), VT);
assert((CC == ISD::SETNE || CC == ISD::SETLT) &&
"Unexpected condition code!");
return LHS.getOperand(0);
}
}
// If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
// pre-promote its result type since vXi1 vectors don't get promoted
// during type legalization.
// NOTE: The element count check is to ignore operand types that need to
// go through type promotion to a 128-bit vector.
if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() > 4 &&
(OpVT.getVectorElementType() == MVT::i8 ||
OpVT.getVectorElementType() == MVT::i16)) {
SDValue Setcc = DAG.getNode(ISD::SETCC, DL, OpVT, LHS, RHS,
N->getOperand(2));
return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
}
// For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
// to avoid scalarization via legalization because v4i32 is not a legal type.
if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
LHS.getValueType() == MVT::v4f32)
return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
return SDValue();
}
static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
SDValue Src = N->getOperand(0);
MVT SrcVT = Src.getSimpleValueType();
// Perform constant folding.
if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) {
assert(N->getValueType(0) == MVT::i32 && "Unexpected result type");
APInt Imm(32, 0);
for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) {
SDValue In = Src.getOperand(Idx);
if (!In.isUndef() &&
cast<ConstantSDNode>(In)->getAPIntValue().isNegative())
Imm.setBit(Idx);
}
return DAG.getConstant(Imm, SDLoc(N), N->getValueType(0));
}
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
// MOVMSK only uses the MSB from each vector element.
KnownBits Known;
APInt DemandedMask(APInt::getSignMask(SrcVT.getScalarSizeInBits()));
if (TLI.SimplifyDemandedBits(Src, DemandedMask, Known, TLO)) {
DCI.AddToWorklist(Src.getNode());
DCI.CommitTargetLoweringOpt(TLO);
return SDValue(N, 0);
}
return SDValue();
}
static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
if (DCI.isBeforeLegalizeOps()) {
SDValue Index = N->getOperand(4);
// Remove any sign extends from 32 or smaller to larger than 32.
// Only do this before LegalizeOps in case we need the sign extend for
// legalization.
if (Index.getOpcode() == ISD::SIGN_EXTEND) {
if (Index.getScalarValueSizeInBits() > 32 &&
Index.getOperand(0).getScalarValueSizeInBits() <= 32) {
SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
NewOps[4] = Index.getOperand(0);
SDNode *Res = DAG.UpdateNodeOperands(N, NewOps);
if (Res == N) {
// The original sign extend has less users, add back to worklist in
// case it needs to be removed
DCI.AddToWorklist(Index.getNode());
DCI.AddToWorklist(N);
}
return SDValue(Res, 0);
}
}
// Make sure the index is either i32 or i64
unsigned ScalarSize = Index.getScalarValueSizeInBits();
if (ScalarSize != 32 && ScalarSize != 64) {
MVT EltVT = ScalarSize > 32 ? MVT::i64 : MVT::i32;
EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
Index.getValueType().getVectorNumElements());
Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
NewOps[4] = Index;
SDNode *Res = DAG.UpdateNodeOperands(N, NewOps);
if (Res == N)
DCI.AddToWorklist(N);
return SDValue(Res, 0);
}
// Try to remove zero extends from 32->64 if we know the sign bit of
// the input is zero.
if (Index.getOpcode() == ISD::ZERO_EXTEND &&
Index.getScalarValueSizeInBits() == 64 &&
Index.getOperand(0).getScalarValueSizeInBits() == 32) {
if (DAG.SignBitIsZero(Index.getOperand(0))) {
SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end());
NewOps[4] = Index.getOperand(0);
SDNode *Res = DAG.UpdateNodeOperands(N, NewOps);
if (Res == N) {
// The original sign extend has less users, add back to worklist in
// case it needs to be removed
DCI.AddToWorklist(Index.getNode());
DCI.AddToWorklist(N);
}
return SDValue(Res, 0);
}
}
}
// With AVX2 we only demand the upper bit of the mask.
if (!Subtarget.hasAVX512()) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
SDValue Mask = N->getOperand(2);
KnownBits Known;
APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
if (TLI.SimplifyDemandedBits(Mask, DemandedMask, Known, TLO)) {
DCI.AddToWorklist(Mask.getNode());
DCI.CommitTargetLoweringOpt(TLO);
return SDValue(N, 0);
}
}
return SDValue();
}
// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
SDValue EFLAGS = N->getOperand(1);
// Try to simplify the EFLAGS and condition code operands.
if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
return getSETCC(CC, Flags, DL, DAG);
return SDValue();
}
/// Optimize branch condition evaluation.
static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDLoc DL(N);
SDValue EFLAGS = N->getOperand(3);
X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
// Try to simplify the EFLAGS and condition code operands.
// Make sure to not keep references to operands, as combineSetCCEFLAGS can
// RAUW them under us.
if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
N->getOperand(1), Cond, Flags);
}
return SDValue();
}
static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
SelectionDAG &DAG) {
// Take advantage of vector comparisons producing 0 or -1 in each lane to
// optimize away operation when it's from a constant.
//
// The general transformation is:
// UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
// AND(VECTOR_CMP(x,y), constant2)
// constant2 = UNARYOP(constant)
// Early exit if this isn't a vector operation, the operand of the
// unary operation isn't a bitwise AND, or if the sizes of the operations
// aren't the same.
EVT VT = N->getValueType(0);
if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
return SDValue();
// Now check that the other operand of the AND is a constant. We could
// make the transformation for non-constant splats as well, but it's unclear
// that would be a benefit as it would not eliminate any operations, just
// perform one more step in scalar code before moving to the vector unit.
if (BuildVectorSDNode *BV =
dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
// Bail out if the vector isn't a constant.
if (!BV->isConstant())
return SDValue();
// Everything checks out. Build up the new and improved node.
SDLoc DL(N);
EVT IntVT = BV->getValueType(0);
// Create a new constant of the appropriate type for the transformed
// DAG.
SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
// The AND node needs bitcasts to/from an integer vector type around it.
SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
N->getOperand(0)->getOperand(0), MaskConst);
SDValue Res = DAG.getBitcast(VT, NewAnd);
return Res;
}
return SDValue();
}
static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue Op0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT InVT = Op0.getValueType();
// UINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
// UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
// UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
SDLoc dl(N);
EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
InVT.getVectorNumElements());
SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
// UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
}
// Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
// optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
// the optimization here.
if (DAG.SignBitIsZero(Op0))
return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
return SDValue();
}
static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// First try to optimize away the conversion entirely when it's
// conditionally from a constant. Vectors only.
if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
return Res;
// Now move on to more general possibilities.
SDValue Op0 = N->getOperand(0);
EVT VT = N->getValueType(0);
EVT InVT = Op0.getValueType();
// SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
// SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
// SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
SDLoc dl(N);
EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
InVT.getVectorNumElements());
SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
}
// Without AVX512DQ we only support i64 to float scalar conversion. For both
// vectors and scalars, see if we know that the upper bits are all the sign
// bit, in which case we can truncate the input to i32 and convert from that.
if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
unsigned BitWidth = InVT.getScalarSizeInBits();
unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
if (NumSignBits >= (BitWidth - 31)) {
EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), 32);
if (InVT.isVector())
TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
InVT.getVectorNumElements());
SDLoc dl(N);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
}
}
// Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
// a 32-bit target where SSE doesn't support i64->FP operations.
if (!Subtarget.useSoftFloat() && Op0.getOpcode() == ISD::LOAD) {
LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
EVT LdVT = Ld->getValueType(0);
// This transformation is not supported if the result type is f16 or f128.
if (VT == MVT::f16 || VT == MVT::f128)
return SDValue();
// If we have AVX512DQ we can use packed conversion instructions unless
// the VT is f80.
if (Subtarget.hasDQI() && VT != MVT::f80)
return SDValue();
if (!Ld->isVolatile() && !VT.isVector() &&
ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
!Subtarget.is64Bit() && LdVT == MVT::i64) {
SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD(
SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
return FILDChain;
}
}
return SDValue();
}
static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
MVT VT = N->getSimpleValueType(0);
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs,
N->getOperand(0), N->getOperand(1),
Flags);
}
return SDValue();
}
// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
// If the LHS and RHS of the ADC node are zero, then it can't overflow and
// the result is either zero or one (depending on the input carry bit).
// Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
if (X86::isZeroNode(N->getOperand(0)) &&
X86::isZeroNode(N->getOperand(1)) &&
// We don't have a good way to replace an EFLAGS use, so only do this when
// dead right now.
SDValue(N, 1).use_empty()) {
SDLoc DL(N);
EVT VT = N->getValueType(0);
SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL,
MVT::i8),
N->getOperand(2)),
DAG.getConstant(1, DL, VT));
return DCI.CombineTo(N, Res1, CarryOut);
}
if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) {
MVT VT = N->getSimpleValueType(0);
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs,
N->getOperand(0), N->getOperand(1),
Flags);
}
return SDValue();
}
/// Materialize "setb reg" as "sbb reg,reg", since it produces an all-ones bit
/// which is more useful than 0/1 in some cases.
static SDValue materializeSBB(SDNode *N, SDValue EFLAGS, SelectionDAG &DAG) {
SDLoc DL(N);
// "Condition code B" is also known as "the carry flag" (CF).
SDValue CF = DAG.getConstant(X86::COND_B, DL, MVT::i8);
SDValue SBB = DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8, CF, EFLAGS);
MVT VT = N->getSimpleValueType(0);
if (VT == MVT::i8)
return DAG.getNode(ISD::AND, DL, VT, SBB, DAG.getConstant(1, DL, VT));
assert(VT == MVT::i1 && "Unexpected type for SETCC node");
return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SBB);
}
/// If this is an add or subtract where one operand is produced by a cmp+setcc,
/// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
/// with CMP+{ADC, SBB}.
static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
bool IsSub = N->getOpcode() == ISD::SUB;
SDValue X = N->getOperand(0);
SDValue Y = N->getOperand(1);
// If this is an add, canonicalize a zext operand to the RHS.
// TODO: Incomplete? What if both sides are zexts?
if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
Y.getOpcode() != ISD::ZERO_EXTEND)
std::swap(X, Y);
// Look through a one-use zext.
bool PeekedThroughZext = false;
if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
Y = Y.getOperand(0);
PeekedThroughZext = true;
}
// If this is an add, canonicalize a setcc operand to the RHS.
// TODO: Incomplete? What if both sides are setcc?
// TODO: Should we allow peeking through a zext of the other operand?
if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
Y.getOpcode() != X86ISD::SETCC)
std::swap(X, Y);
if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
return SDValue();
SDLoc DL(N);
EVT VT = N->getValueType(0);
X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
// If X is -1 or 0, then we have an opportunity to avoid constants required in
// the general case below.
auto *ConstantX = dyn_cast<ConstantSDNode>(X);
if (ConstantX) {
if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
(IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
// This is a complicated way to get -1 or 0 from the carry flag:
// -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
// 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL, MVT::i8),
Y.getOperand(1));
}
if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
(IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
SDValue EFLAGS = Y->getOperand(1);
if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
EFLAGS.getValueType().isInteger() &&
!isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
// Swap the operands of a SUB, and we have the same pattern as above.
// -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
// 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
SDValue NewSub = DAG.getNode(
X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
EFLAGS.getOperand(1), EFLAGS.getOperand(0));
SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL, MVT::i8),
NewEFLAGS);
}
}
}
if (CC == X86::COND_B) {
// X + SETB Z --> X + (mask SBB Z, Z)
// X - SETB Z --> X - (mask SBB Z, Z)
// TODO: Produce ADC/SBB here directly and avoid SETCC_CARRY?
SDValue SBB = materializeSBB(Y.getNode(), Y.getOperand(1), DAG);
if (SBB.getValueSizeInBits() != VT.getSizeInBits())
SBB = DAG.getZExtOrTrunc(SBB, DL, VT);
return DAG.getNode(IsSub ? ISD::SUB : ISD::ADD, DL, VT, X, SBB);
}
if (CC == X86::COND_A) {
SDValue EFLAGS = Y->getOperand(1);
// Try to convert COND_A into COND_B in an attempt to facilitate
// materializing "setb reg".
//
// Do not flip "e > c", where "c" is a constant, because Cmp instruction
// cannot take an immediate as its first operand.
//
if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
EFLAGS.getValueType().isInteger() &&
!isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
EFLAGS.getNode()->getVTList(),
EFLAGS.getOperand(1), EFLAGS.getOperand(0));
SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
SDValue SBB = materializeSBB(Y.getNode(), NewEFLAGS, DAG);
if (SBB.getValueSizeInBits() != VT.getSizeInBits())
SBB = DAG.getZExtOrTrunc(SBB, DL, VT);
return DAG.getNode(IsSub ? ISD::SUB : ISD::ADD, DL, VT, X, SBB);
}
}
if (CC != X86::COND_E && CC != X86::COND_NE)
return SDValue();
SDValue Cmp = Y.getOperand(1);
if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
!X86::isZeroNode(Cmp.getOperand(1)) ||
!Cmp.getOperand(0).getValueType().isInteger())
return SDValue();
SDValue Z = Cmp.getOperand(0);
EVT ZVT = Z.getValueType();
// If X is -1 or 0, then we have an opportunity to avoid constants required in
// the general case below.
if (ConstantX) {
// 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
// fake operands:
// 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
// -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) ||
(!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) {
SDValue Zero = DAG.getConstant(0, DL, ZVT);
SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL, MVT::i8),
SDValue(Neg.getNode(), 1));
}
// cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
// with fake operands:
// 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
// -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) ||
(!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) {
SDValue One = DAG.getConstant(1, DL, ZVT);
SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL, MVT::i8), Cmp1);
}
}
// (cmp Z, 1) sets the carry flag if Z is 0.
SDValue One = DAG.getConstant(1, DL, ZVT);
SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
// Add the flags type for ADC/SBB nodes.
SDVTList VTs = DAG.getVTList(VT, MVT::i32);
// X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
// X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
if (CC == X86::COND_NE)
return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
DAG.getConstant(-1ULL, DL, VT), Cmp1);
// X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
// X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
DAG.getConstant(0, DL, VT), Cmp1);
}
static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasSSE2())
return SDValue();
SDValue MulOp = N->getOperand(0);
SDValue Phi = N->getOperand(1);
if (MulOp.getOpcode() != ISD::MUL)
std::swap(MulOp, Phi);
if (MulOp.getOpcode() != ISD::MUL)
return SDValue();
ShrinkMode Mode;
if (!canReduceVMulWidth(MulOp.getNode(), DAG, Mode) || Mode == MULU16)
return SDValue();
EVT VT = N->getValueType(0);
// If the vector size is less than 128, or greater than the supported RegSize,
// do not use PMADD.
if (VT.getVectorNumElements() < 8)
return SDValue();
SDLoc DL(N);
EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
VT.getVectorNumElements());
EVT MAddVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
VT.getVectorNumElements() / 2);
// Shrink the operands of mul.
SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(0));
SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(1));
// Madd vector size is half of the original vector size
auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
return DAG.getNode(X86ISD::VPMADDWD, DL, VT, Ops);
};
SDValue Madd = SplitOpsAndApply(DAG, Subtarget, DL, MAddVT, { N0, N1 },
PMADDWDBuilder);
// Fill the rest of the output with 0
SDValue Zero = getZeroVector(Madd.getSimpleValueType(), Subtarget, DAG, DL);
SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Madd, Zero);
return DAG.getNode(ISD::ADD, DL, VT, Concat, Phi);
}
static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasSSE2())
return SDValue();
SDLoc DL(N);
EVT VT = N->getValueType(0);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
// TODO: There's nothing special about i32, any integer type above i16 should
// work just as well.
if (!VT.isVector() || !VT.isSimple() ||
!(VT.getVectorElementType() == MVT::i32))
return SDValue();
unsigned RegSize = 128;
if (Subtarget.useBWIRegs())
RegSize = 512;
else if (Subtarget.hasAVX())
RegSize = 256;
// We only handle v16i32 for SSE2 / v32i32 for AVX / v64i32 for AVX512.
// TODO: We should be able to handle larger vectors by splitting them before
// feeding them into several SADs, and then reducing over those.
if (VT.getSizeInBits() / 4 > RegSize)
return SDValue();
// We know N is a reduction add, which means one of its operands is a phi.
// To match SAD, we need the other operand to be a vector select.
SDValue SelectOp, Phi;
if (Op0.getOpcode() == ISD::VSELECT) {
SelectOp = Op0;
Phi = Op1;
} else if (Op1.getOpcode() == ISD::VSELECT) {
SelectOp = Op1;
Phi = Op0;
} else
return SDValue();
// Check whether we have an abs-diff pattern feeding into the select.
if(!detectZextAbsDiff(SelectOp, Op0, Op1))
return SDValue();
// SAD pattern detected. Now build a SAD instruction and an addition for
// reduction. Note that the number of elements of the result of SAD is less
// than the number of elements of its input. Therefore, we could only update
// part of elements in the reduction vector.
SDValue Sad = createPSADBW(DAG, Op0, Op1, DL, Subtarget);
// The output of PSADBW is a vector of i64.
// We need to turn the vector of i64 into a vector of i32.
// If the reduction vector is at least as wide as the psadbw result, just
// bitcast. If it's narrower, truncate - the high i32 of each i64 is zero
// anyway.
MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);
if (VT.getSizeInBits() >= ResVT.getSizeInBits())
Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
else
Sad = DAG.getNode(ISD::TRUNCATE, DL, VT, Sad);
if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
// Fill the upper elements with zero to match the add width.
SDValue Zero = DAG.getConstant(0, DL, VT);
Sad = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Zero, Sad,
DAG.getIntPtrConstant(0, DL));
}
return DAG.getNode(ISD::ADD, DL, VT, Sad, Phi);
}
/// Convert vector increment or decrement to sub/add with an all-ones constant:
/// add X, <1, 1...> --> sub X, <-1, -1...>
/// sub X, <1, 1...> --> add X, <-1, -1...>
/// The all-ones vector constant can be materialized using a pcmpeq instruction
/// that is commonly recognized as an idiom (has no register dependency), so
/// that's better/smaller than loading a splat 1 constant.
static SDValue combineIncDecVector(SDNode *N, SelectionDAG &DAG) {
assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
"Unexpected opcode for increment/decrement transform");
// Pseudo-legality check: getOnesVector() expects one of these types, so bail
// out and wait for legalization if we have an unsupported vector length.
EVT VT = N->getValueType(0);
if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
return SDValue();
SDNode *N1 = N->getOperand(1).getNode();
APInt SplatVal;
if (!ISD::isConstantSplatVector(N1, SplatVal) ||
!SplatVal.isOneValue())
return SDValue();
SDValue AllOnesVec = getOnesVector(VT, DAG, SDLoc(N));
unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
return DAG.getNode(NewOpcode, SDLoc(N), VT, N->getOperand(0), AllOnesVec);
}
static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
const SDLoc &DL, EVT VT,
const X86Subtarget &Subtarget) {
// Example of pattern we try to detect:
// t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
//(add (build_vector (extract_elt t, 0),
// (extract_elt t, 2),
// (extract_elt t, 4),
// (extract_elt t, 6)),
// (build_vector (extract_elt t, 1),
// (extract_elt t, 3),
// (extract_elt t, 5),
// (extract_elt t, 7)))
if (!Subtarget.hasSSE2())
return SDValue();
if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
Op1.getOpcode() != ISD::BUILD_VECTOR)
return SDValue();
if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
VT.getVectorNumElements() < 4 ||
!isPowerOf2_32(VT.getVectorNumElements()))
return SDValue();
// Check if one of Op0,Op1 is of the form:
// (build_vector (extract_elt Mul, 0),
// (extract_elt Mul, 2),
// (extract_elt Mul, 4),
// ...
// the other is of the form:
// (build_vector (extract_elt Mul, 1),
// (extract_elt Mul, 3),
// (extract_elt Mul, 5),
// ...
// and identify Mul.
SDValue Mul;
for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
// TODO: Be more tolerant to undefs.
if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
if (!Const0L || !Const1L || !Const0H || !Const1H)
return SDValue();
unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
// Commutativity of mul allows factors of a product to reorder.
if (Idx0L > Idx1L)
std::swap(Idx0L, Idx1L);
if (Idx0H > Idx1H)
std::swap(Idx0H, Idx1H);
// Commutativity of add allows pairs of factors to reorder.
if (Idx0L > Idx0H) {
std::swap(Idx0L, Idx0H);
std::swap(Idx1L, Idx1H);
}
if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
Idx1H != 2 * i + 3)
return SDValue();
if (!Mul) {
// First time an extract_elt's source vector is visited. Must be a MUL
// with 2X number of vector elements than the BUILD_VECTOR.
// Both extracts must be from same MUL.
Mul = Op0L->getOperand(0);
if (Mul->getOpcode() != ISD::MUL ||
Mul.getValueType().getVectorNumElements() != 2 * e)
return SDValue();
}
// Check that the extract is from the same MUL previously seen.
if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
return SDValue();
}
// Check if the Mul source can be safely shrunk.
ShrinkMode Mode;
if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) || Mode == MULU16)
return SDValue();
auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
// Shrink by adding truncate nodes and let DAGCombine fold with the
// sources.
EVT InVT = Ops[0].getValueType();
assert(InVT.getScalarType() == MVT::i32 &&
"Unexpected scalar element type");
assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
InVT.getVectorNumElements() / 2);
EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
InVT.getVectorNumElements());
return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[0]),
DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[1]));
};
return SplitOpsAndApply(DAG, Subtarget, DL, VT,
{ Mul.getOperand(0), Mul.getOperand(1) },
PMADDBuilder);
}
// Attempt to turn this pattern into PMADDWD.
// (mul (add (zext (build_vector)), (zext (build_vector))),
// (add (zext (build_vector)), (zext (build_vector)))
static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
const SDLoc &DL, EVT VT,
const X86Subtarget &Subtarget) {
if (!Subtarget.hasSSE2())
return SDValue();
if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
return SDValue();
if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
VT.getVectorNumElements() < 4 ||
!isPowerOf2_32(VT.getVectorNumElements()))
return SDValue();
SDValue N00 = N0.getOperand(0);
SDValue N01 = N0.getOperand(1);
SDValue N10 = N1.getOperand(0);
SDValue N11 = N1.getOperand(1);
// All inputs need to be sign extends.
// TODO: Support ZERO_EXTEND from known positive?
if (N00.getOpcode() != ISD::SIGN_EXTEND ||
N01.getOpcode() != ISD::SIGN_EXTEND ||
N10.getOpcode() != ISD::SIGN_EXTEND ||
N11.getOpcode() != ISD::SIGN_EXTEND)
return SDValue();
// Peek through the extends.
N00 = N00.getOperand(0);
N01 = N01.getOperand(0);
N10 = N10.getOperand(0);
N11 = N11.getOperand(0);
// Must be extending from vXi16.
EVT InVT = N00.getValueType();
if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
N10.getValueType() != InVT || N11.getValueType() != InVT)
return SDValue();
// All inputs should be build_vectors.
if (N00.getOpcode() != ISD::BUILD_VECTOR ||
N01.getOpcode() != ISD::BUILD_VECTOR ||
N10.getOpcode() != ISD::BUILD_VECTOR ||
N11.getOpcode() != ISD::BUILD_VECTOR)
return SDValue();
// For each element, we need to ensure we have an odd element from one vector
// multiplied by the odd element of another vector and the even element from
// one of the same vectors being multiplied by the even element from the
// other vector. So we need to make sure for each element i, this operator
// is being performed:
// A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
SDValue In0, In1;
for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
SDValue N00Elt = N00.getOperand(i);
SDValue N01Elt = N01.getOperand(i);
SDValue N10Elt = N10.getOperand(i);
SDValue N11Elt = N11.getOperand(i);
// TODO: Be more tolerant to undefs.
if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return SDValue();
auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
return SDValue();
unsigned IdxN00 = ConstN00Elt->getZExtValue();
unsigned IdxN01 = ConstN01Elt->getZExtValue();
unsigned IdxN10 = ConstN10Elt->getZExtValue();
unsigned IdxN11 = ConstN11Elt->getZExtValue();
// Add is commutative so indices can be reordered.
if (IdxN00 > IdxN10) {
std::swap(IdxN00, IdxN10);
std::swap(IdxN01, IdxN11);
}
// N0 indices be the even element. N1 indices must be the next odd element.
if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
return SDValue();
SDValue N00In = N00Elt.getOperand(0);
SDValue N01In = N01Elt.getOperand(0);
SDValue N10In = N10Elt.getOperand(0);
SDValue N11In = N11Elt.getOperand(0);
// First time we find an input capture it.
if (!In0) {
In0 = N00In;
In1 = N01In;
}
// Mul is commutative so the input vectors can be in any order.
// Canonicalize to make the compares easier.
if (In0 != N00In)
std::swap(N00In, N01In);
if (In0 != N10In)
std::swap(N10In, N11In);
if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
return SDValue();
}
auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
// Shrink by adding truncate nodes and let DAGCombine fold with the
// sources.
EVT InVT = Ops[0].getValueType();
assert(InVT.getScalarType() == MVT::i16 &&
"Unexpected scalar element type");
assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
InVT.getVectorNumElements() / 2);
return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
};
return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
PMADDBuilder);
}
static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
const SDNodeFlags Flags = N->getFlags();
if (Flags.hasVectorReduction()) {
if (SDValue Sad = combineLoopSADPattern(N, DAG, Subtarget))
return Sad;
if (SDValue MAdd = combineLoopMAddPattern(N, DAG, Subtarget))
return MAdd;
}
EVT VT = N->getValueType(0);
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
return MAdd;
if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
return MAdd;
// Try to synthesize horizontal adds from adds of shuffles.
if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
VT == MVT::v8i32) &&
Subtarget.hasSSSE3() && isHorizontalBinOp(Op0, Op1, true)) {
auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops);
};
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
HADDBuilder);
}
if (SDValue V = combineIncDecVector(N, DAG))
return V;
return combineAddOrSubToADCOrSBB(N, DAG);
}
static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
EVT VT = N->getValueType(0);
// PSUBUS is supported, starting from SSE2, but truncation for v8i32
// is only worth it with SSSE3 (PSHUFB).
if (!(Subtarget.hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) &&
!(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) &&
!(Subtarget.hasAVX() && (VT == MVT::v32i8 || VT == MVT::v16i16)) &&
!(Subtarget.useBWIRegs() && (VT == MVT::v64i8 || VT == MVT::v32i16 ||
VT == MVT::v16i32 || VT == MVT::v8i64)))
return SDValue();
SDValue SubusLHS, SubusRHS;
// Try to find umax(a,b) - b or a - umin(a,b) patterns
// they may be converted to subus(a,b).
// TODO: Need to add IR canonicalization for this code.
if (Op0.getOpcode() == ISD::UMAX) {
SubusRHS = Op1;
SDValue MaxLHS = Op0.getOperand(0);
SDValue MaxRHS = Op0.getOperand(1);
if (MaxLHS == Op1)
SubusLHS = MaxRHS;
else if (MaxRHS == Op1)
SubusLHS = MaxLHS;
else
return SDValue();
} else if (Op1.getOpcode() == ISD::UMIN) {
SubusLHS = Op0;
SDValue MinLHS = Op1.getOperand(0);
SDValue MinRHS = Op1.getOperand(1);
if (MinLHS == Op0)
SubusRHS = MinRHS;
else if (MinRHS == Op0)
SubusRHS = MinLHS;
else
return SDValue();
} else
return SDValue();
auto SUBUSBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(X86ISD::SUBUS, DL, Ops[0].getValueType(), Ops);
};
// PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
// special preprocessing in some cases.
if (VT != MVT::v8i32 && VT != MVT::v16i32 && VT != MVT::v8i64)
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
{ SubusLHS, SubusRHS }, SUBUSBuilder);
// Special preprocessing case can be only applied
// if the value was zero extended from 16 bit,
// so we require first 16 bits to be zeros for 32 bit
// values, or first 48 bits for 64 bit values.
KnownBits Known;
DAG.computeKnownBits(SubusLHS, Known);
unsigned NumZeros = Known.countMinLeadingZeros();
if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
return SDValue();
EVT ExtType = SubusLHS.getValueType();
EVT ShrinkedType;
if (VT == MVT::v8i32 || VT == MVT::v8i64)
ShrinkedType = MVT::v8i16;
else
ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;
// If SubusLHS is zeroextended - truncate SubusRHS to it's
// size SubusRHS = umin(0xFFF.., SubusRHS).
SDValue SaturationConst =
DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
ShrinkedType.getScalarSizeInBits()),
SDLoc(SubusLHS), ExtType);
SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
SaturationConst);
SDValue NewSubusLHS =
DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
SDValue Psubus =
SplitOpsAndApply(DAG, Subtarget, SDLoc(N), ShrinkedType,
{ NewSubusLHS, NewSubusRHS }, SUBUSBuilder);
// Zero extend the result, it may be used somewhere as 32 bit,
// if not zext and following trunc will shrink.
return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
}
static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
// X86 can't encode an immediate LHS of a sub. See if we can push the
// negation into a preceding instruction.
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
// If the RHS of the sub is a XOR with one use and a constant, invert the
// immediate. Then add one to the LHS of the sub so we can turn
// X-Y -> X+~Y+1, saving one register.
if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
isa<ConstantSDNode>(Op1.getOperand(1))) {
APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
EVT VT = Op0.getValueType();
SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
Op1.getOperand(0),
DAG.getConstant(~XorC, SDLoc(Op1), VT));
return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
}
}
// Try to synthesize horizontal subs from subs of shuffles.
EVT VT = N->getValueType(0);
if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
VT == MVT::v8i32) &&
Subtarget.hasSSSE3() && isHorizontalBinOp(Op0, Op1, false)) {
auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops);
};
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
HSUBBuilder);
}
if (SDValue V = combineIncDecVector(N, DAG))
return V;
// Try to create PSUBUS if SUB's argument is max/min
if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
return V;
return combineAddOrSubToADCOrSBB(N, DAG);
}
static SDValue combineVSZext(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalize())
return SDValue();
SDLoc DL(N);
unsigned Opcode = N->getOpcode();
MVT VT = N->getSimpleValueType(0);
MVT SVT = VT.getVectorElementType();
unsigned NumElts = VT.getVectorNumElements();
unsigned EltSizeInBits = SVT.getSizeInBits();
SDValue Op = N->getOperand(0);
MVT OpVT = Op.getSimpleValueType();
MVT OpEltVT = OpVT.getVectorElementType();
unsigned OpEltSizeInBits = OpEltVT.getSizeInBits();
unsigned InputBits = OpEltSizeInBits * NumElts;
// Perform any constant folding.
// FIXME: Reduce constant pool usage and don't fold when OptSize is enabled.
APInt UndefElts;
SmallVector<APInt, 64> EltBits;
if (getTargetConstantBitsFromNode(Op, OpEltSizeInBits, UndefElts, EltBits)) {
APInt Undefs(NumElts, 0);
SmallVector<APInt, 4> Vals(NumElts, APInt(EltSizeInBits, 0));
bool IsZEXT =
(Opcode == X86ISD::VZEXT) || (Opcode == ISD::ZERO_EXTEND_VECTOR_INREG);
for (unsigned i = 0; i != NumElts; ++i) {
if (UndefElts[i]) {
Undefs.setBit(i);
continue;
}
Vals[i] = IsZEXT ? EltBits[i].zextOrTrunc(EltSizeInBits)
: EltBits[i].sextOrTrunc(EltSizeInBits);
}
return getConstVector(Vals, Undefs, VT, DAG, DL);
}
// (vzext (bitcast (vzext (x)) -> (vzext x)
// TODO: (vsext (bitcast (vsext (x)) -> (vsext x)
SDValue V = peekThroughBitcasts(Op);
if (Opcode == X86ISD::VZEXT && V != Op && V.getOpcode() == X86ISD::VZEXT) {
MVT InnerVT = V.getSimpleValueType();
MVT InnerEltVT = InnerVT.getVectorElementType();
// If the element sizes match exactly, we can just do one larger vzext. This
// is always an exact type match as vzext operates on integer types.
if (OpEltVT == InnerEltVT) {
assert(OpVT == InnerVT && "Types must match for vzext!");
return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
}
// The only other way we can combine them is if only a single element of the
// inner vzext is used in the input to the outer vzext.
if (InnerEltVT.getSizeInBits() < InputBits)
return SDValue();
// In this case, the inner vzext is completely dead because we're going to
// only look at bits inside of the low element. Just do the outer vzext on
// a bitcast of the input to the inner.
return DAG.getNode(X86ISD::VZEXT, DL, VT, DAG.getBitcast(OpVT, V));
}
// Check if we can bypass extracting and re-inserting an element of an input
// vector. Essentially:
// (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
// TODO: Add X86ISD::VSEXT support
if (Opcode == X86ISD::VZEXT &&
V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
SDValue ExtractedV = V.getOperand(0);
SDValue OrigV = ExtractedV.getOperand(0);
if (isNullConstant(ExtractedV.getOperand(1))) {
MVT OrigVT = OrigV.getSimpleValueType();
// Extract a subvector if necessary...
if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
OrigVT.getVectorNumElements() / Ratio);
OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
DAG.getIntPtrConstant(0, DL));
}
Op = DAG.getBitcast(OpVT, OrigV);
return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
}
}
return SDValue();
}
static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
MVT VT = N->getSimpleValueType(0);
SDLoc DL(N);
if (N->getOperand(0) == N->getOperand(1)) {
if (N->getOpcode() == X86ISD::PCMPEQ)
return getOnesVector(VT, DAG, DL);
if (N->getOpcode() == X86ISD::PCMPGT)
return getZeroVector(VT, Subtarget, DAG, DL);
}
return SDValue();
}
static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
MVT OpVT = N->getSimpleValueType(0);
bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
SDLoc dl(N);
SDValue Vec = N->getOperand(0);
SDValue SubVec = N->getOperand(1);
unsigned IdxVal = N->getConstantOperandVal(2);
MVT SubVecVT = SubVec.getSimpleValueType();
if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
// Inserting zeros into zeros is a nop.
if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
return getZeroVector(OpVT, Subtarget, DAG, dl);
// If we're inserting into a zero vector and then into a larger zero vector,
// just insert into the larger zero vector directly.
if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
unsigned Idx2Val = SubVec.getConstantOperandVal(2);
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
getZeroVector(OpVT, Subtarget, DAG, dl),
SubVec.getOperand(1),
DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
}
// If we're inserting into a zero vector and our input was extracted from an
// insert into a zero vector of the same type and the extraction was at
// least as large as the original insertion. Just insert the original
// subvector into a zero vector.
if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
SubVec.getConstantOperandVal(1) == 0 &&
SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
SDValue Ins = SubVec.getOperand(0);
if (Ins.getConstantOperandVal(2) == 0 &&
ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits())
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
getZeroVector(OpVT, Subtarget, DAG, dl),
Ins.getOperand(1), N->getOperand(2));
}
// If we're inserting a bitcast into zeros, rewrite the insert and move the
// bitcast to the other side. This helps with detecting zero extending
// during isel.
// TODO: Is this useful for other indices than 0?
if (!IsI1Vector && SubVec.getOpcode() == ISD::BITCAST && IdxVal == 0) {
MVT CastVT = SubVec.getOperand(0).getSimpleValueType();
unsigned NumElems = OpVT.getSizeInBits() / CastVT.getScalarSizeInBits();
MVT NewVT = MVT::getVectorVT(CastVT.getVectorElementType(), NumElems);
SDValue Insert = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NewVT,
DAG.getBitcast(NewVT, Vec),
SubVec.getOperand(0), N->getOperand(2));
return DAG.getBitcast(OpVT, Insert);
}
}
// Stop here if this is an i1 vector.
if (IsI1Vector)
return SDValue();
// If this is an insert of an extract, combine to a shuffle. Don't do this
// if the insert or extract can be represented with a subregister operation.
if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
SubVec.getOperand(0).getSimpleValueType() == OpVT &&
(IdxVal != 0 || !Vec.isUndef())) {
int ExtIdxVal = SubVec.getConstantOperandVal(1);
if (ExtIdxVal != 0) {
int VecNumElts = OpVT.getVectorNumElements();
int SubVecNumElts = SubVecVT.getVectorNumElements();
SmallVector<int, 64> Mask(VecNumElts);
// First create an identity shuffle mask.
for (int i = 0; i != VecNumElts; ++i)
Mask[i] = i;
// Now insert the extracted portion.
for (int i = 0; i != SubVecNumElts; ++i)
Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
}
}
// Fold two 16-byte or 32-byte subvector loads into one 32-byte or 64-byte
// load:
// (insert_subvector (insert_subvector undef, (load16 addr), 0),
// (load16 addr + 16), Elts/2)
// --> load32 addr
// or:
// (insert_subvector (insert_subvector undef, (load32 addr), 0),
// (load32 addr + 32), Elts/2)
// --> load64 addr
// or a 16-byte or 32-byte broadcast:
// (insert_subvector (insert_subvector undef, (load16 addr), 0),
// (load16 addr), Elts/2)
// --> X86SubVBroadcast(load16 addr)
// or:
// (insert_subvector (insert_subvector undef, (load32 addr), 0),
// (load32 addr), Elts/2)
// --> X86SubVBroadcast(load32 addr)
if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
OpVT.getSizeInBits() == SubVecVT.getSizeInBits() * 2) {
if (isNullConstant(Vec.getOperand(2))) {
SDValue SubVec2 = Vec.getOperand(1);
// If needed, look through bitcasts to get to the load.
if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(SubVec2))) {
bool Fast;
unsigned Alignment = FirstLd->getAlignment();
unsigned AS = FirstLd->getAddressSpace();
const X86TargetLowering *TLI = Subtarget.getTargetLowering();
if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
OpVT, AS, Alignment, &Fast) && Fast) {
SDValue Ops[] = {SubVec2, SubVec};
if (SDValue Ld = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG,
Subtarget, false))
return Ld;
}
}
// If lower/upper loads are the same and the only users of the load, then
// lower to a VBROADCASTF128/VBROADCASTI128/etc.
if (auto *Ld = dyn_cast<LoadSDNode>(peekThroughOneUseBitcasts(SubVec2)))
if (SubVec2 == SubVec && ISD::isNormalLoad(Ld) &&
SDNode::areOnlyUsersOf({N, Vec.getNode()}, SubVec2.getNode()))
return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT, SubVec);
// If this is subv_broadcast insert into both halves, use a larger
// subv_broadcast.
if (SubVec.getOpcode() == X86ISD::SUBV_BROADCAST && SubVec == SubVec2)
return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT,
SubVec.getOperand(0));
// If we're inserting all zeros into the upper half, change this to
// an insert into an all zeros vector. We will match this to a move
// with implicit upper bit zeroing during isel.
if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
getZeroVector(OpVT, Subtarget, DAG, dl), SubVec2,
Vec.getOperand(2));
// If we are inserting into both halves of the vector, the starting
// vector should be undef. If it isn't, make it so. Only do this if the
// the early insert has no other uses.
// TODO: Should this be a generic DAG combine?
if (!Vec.getOperand(0).isUndef() && Vec.hasOneUse()) {
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, DAG.getUNDEF(OpVT),
SubVec2, Vec.getOperand(2));
return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Vec, SubVec,
N->getOperand(2));
}
}
}
return SDValue();
}
static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
MVT OpVT = N->getSimpleValueType(0);
SDValue InVec = N->getOperand(0);
unsigned IdxVal = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
if (ISD::isBuildVectorAllZeros(InVec.getNode()))
return getZeroVector(OpVT, Subtarget, DAG, SDLoc(N));
if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
if (OpVT.getScalarType() == MVT::i1)
return DAG.getConstant(1, SDLoc(N), OpVT);
return getOnesVector(OpVT, DAG, SDLoc(N));
}
if (InVec.getOpcode() == ISD::BUILD_VECTOR)
return DAG.getBuildVector(
OpVT, SDLoc(N),
InVec.getNode()->ops().slice(IdxVal, OpVT.getVectorNumElements()));
// If we're extracting the lowest subvector and we're the only user,
// we may be able to perform this with a smaller vector width.
if (IdxVal == 0 && InVec.hasOneUse()) {
unsigned InOpcode = InVec.getOpcode();
if (OpVT == MVT::v2f64 && InVec.getValueType() == MVT::v4f64) {
// v2f64 CVTDQ2PD(v4i32).
if (InOpcode == ISD::SINT_TO_FP &&
InVec.getOperand(0).getValueType() == MVT::v4i32) {
return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), OpVT, InVec.getOperand(0));
}
// v2f64 CVTPS2PD(v4f32).
if (InOpcode == ISD::FP_EXTEND &&
InVec.getOperand(0).getValueType() == MVT::v4f32) {
return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), OpVT, InVec.getOperand(0));
}
}
if ((InOpcode == X86ISD::VZEXT || InOpcode == X86ISD::VSEXT) &&
OpVT.is128BitVector() &&
InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
unsigned ExtOp = InOpcode == X86ISD::VZEXT ? ISD::ZERO_EXTEND_VECTOR_INREG
: ISD::SIGN_EXTEND_VECTOR_INREG;
return DAG.getNode(ExtOp, SDLoc(N), OpVT, InVec.getOperand(0));
}
}
return SDValue();
}
static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0);
SDValue Src = N->getOperand(0);
// If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
// This occurs frequently in our masked scalar intrinsic code and our
// floating point select lowering with AVX512.
// TODO: SimplifyDemandedBits instead?
if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
if (C->getAPIntValue().isOneValue())
return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), MVT::v1i1,
Src.getOperand(0));
return SDValue();
}
// Simplify PMULDQ and PMULUDQ operations.
static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI) {
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
APInt DemandedMask(APInt::getLowBitsSet(64, 32));
// PMULQDQ/PMULUDQ only uses lower 32 bits from each vector element.
KnownBits LHSKnown;
if (TLI.SimplifyDemandedBits(LHS, DemandedMask, LHSKnown, TLO)) {
DCI.CommitTargetLoweringOpt(TLO);
return SDValue(N, 0);
}
KnownBits RHSKnown;
if (TLI.SimplifyDemandedBits(RHS, DemandedMask, RHSKnown, TLO)) {
DCI.CommitTargetLoweringOpt(TLO);
return SDValue(N, 0);
}
return SDValue();
}
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
default: break;
case ISD::SCALAR_TO_VECTOR:
return combineScalarToVector(N, DAG);
case ISD::EXTRACT_VECTOR_ELT:
case X86ISD::PEXTRW:
case X86ISD::PEXTRB:
return combineExtractVectorElt(N, DAG, DCI, Subtarget);
case ISD::INSERT_SUBVECTOR:
return combineInsertSubvector(N, DAG, DCI, Subtarget);
case ISD::EXTRACT_SUBVECTOR:
return combineExtractSubvector(N, DAG, DCI, Subtarget);
case ISD::VSELECT:
case ISD::SELECT:
case X86ISD::SHRUNKBLEND: return combineSelect(N, DAG, DCI, Subtarget);
case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
case ISD::ADD: return combineAdd(N, DAG, Subtarget);
case ISD::SUB: return combineSub(N, DAG, Subtarget);
case X86ISD::SBB: return combineSBB(N, DAG);
case X86ISD::ADC: return combineADC(N, DAG, DCI);
case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
case ISD::SHL:
case ISD::SRA:
case ISD::SRL: return combineShift(N, DAG, DCI, Subtarget);
case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
case X86ISD::BEXTR: return combineBEXTR(N, DAG, DCI, Subtarget);
case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
case ISD::STORE: return combineStore(N, DAG, Subtarget);
case ISD::MSTORE: return combineMaskedStore(N, DAG, Subtarget);
case ISD::SINT_TO_FP: return combineSIntToFP(N, DAG, Subtarget);
case ISD::UINT_TO_FP: return combineUIntToFP(N, DAG, Subtarget);
case ISD::FADD:
case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
case X86ISD::FXOR:
case X86ISD::FOR: return combineFOr(N, DAG, Subtarget);
case X86ISD::FMIN:
case X86ISD::FMAX: return combineFMinFMax(N, DAG);
case ISD::FMINNUM:
case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
case X86ISD::BT: return combineBT(N, DAG, DCI);
case ISD::ANY_EXTEND:
case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
case X86ISD::PACKSS:
case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget);
case X86ISD::VSHLI:
case X86ISD::VSRAI:
case X86ISD::VSRLI:
return combineVectorShiftImm(N, DAG, DCI, Subtarget);
case ISD::SIGN_EXTEND_VECTOR_INREG:
case ISD::ZERO_EXTEND_VECTOR_INREG:
case X86ISD::VSEXT:
case X86ISD::VZEXT: return combineVSZext(N, DAG, DCI, Subtarget);
case X86ISD::PINSRB:
case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
case X86ISD::SHUFP: // Handle all target specific shuffles
case X86ISD::INSERTPS:
case X86ISD::EXTRQI:
case X86ISD::INSERTQI:
case X86ISD::PALIGNR:
case X86ISD::VSHLDQ:
case X86ISD::VSRLDQ:
case X86ISD::BLENDI:
case X86ISD::UNPCKH:
case X86ISD::UNPCKL:
case X86ISD::MOVHLPS:
case X86ISD::MOVLHPS:
case X86ISD::PSHUFB:
case X86ISD::PSHUFD:
case X86ISD::PSHUFHW:
case X86ISD::PSHUFLW:
case X86ISD::MOVSHDUP:
case X86ISD::MOVSLDUP:
case X86ISD::MOVDDUP:
case X86ISD::MOVSS:
case X86ISD::MOVSD:
case X86ISD::VBROADCAST:
case X86ISD::VPPERM:
case X86ISD::VPERMI:
case X86ISD::VPERMV:
case X86ISD::VPERMV3:
case X86ISD::VPERMIL2:
case X86ISD::VPERMILPI:
case X86ISD::VPERMILPV:
case X86ISD::VPERM2X128:
case X86ISD::SHUF128:
case X86ISD::VZEXT_MOVL:
case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
case X86ISD::FMADD_RND:
case X86ISD::FMSUB:
case X86ISD::FMSUB_RND:
case X86ISD::FNMADD:
case X86ISD::FNMADD_RND:
case X86ISD::FNMSUB:
case X86ISD::FNMSUB_RND:
case ISD::FMA: return combineFMA(N, DAG, Subtarget);
case X86ISD::FMADDSUB_RND:
case X86ISD::FMSUBADD_RND:
case X86ISD::FMADDSUB:
case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, Subtarget);
case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI);
case X86ISD::MGATHER:
case X86ISD::MSCATTER:
case ISD::MGATHER:
case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI, Subtarget);
case X86ISD::PCMPEQ:
case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
case X86ISD::PMULDQ:
case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI);
}
return SDValue();
}
/// Return true if the target has native support for the specified value type
/// and it is 'desirable' to use the type for the given node type. e.g. On x86
/// i16 is legal, but undesirable since i16 instruction encodings are longer and
/// some i16 instructions are slow.
bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
if (!isTypeLegal(VT))
return false;
// There are no vXi8 shifts.
if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
return false;
if (VT != MVT::i16)
return true;
switch (Opc) {
default:
return true;
case ISD::LOAD:
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
case ISD::SHL:
case ISD::SRL:
case ISD::SUB:
case ISD::ADD:
case ISD::MUL:
case ISD::AND:
case ISD::OR:
case ISD::XOR:
return false;
}
}
SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
SDValue Value, SDValue Addr,
SelectionDAG &DAG) const {
const Module *M = DAG.getMachineFunction().getMMI().getModule();
Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
if (IsCFProtectionSupported) {
// In case control-flow branch protection is enabled, we need to add
// notrack prefix to the indirect branch.
// In order to do that we create NT_BRIND SDNode.
// Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
}
return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
}
/// This method query the target whether it is beneficial for dag combiner to
/// promote the specified node. If true, it should return the desired promotion
/// type by reference.
bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
EVT VT = Op.getValueType();
if (VT != MVT::i16)
return false;
auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
if (!Op.hasOneUse())
return false;
SDNode *User = *Op->use_begin();
if (!ISD::isNormalStore(User))
return false;
auto *Ld = cast<LoadSDNode>(Load);
auto *St = cast<StoreSDNode>(User);
return Ld->getBasePtr() == St->getBasePtr();
};
bool Commute = false;
switch (Op.getOpcode()) {
default: return false;
case ISD::SIGN_EXTEND:
case ISD::ZERO_EXTEND:
case ISD::ANY_EXTEND:
break;
case ISD::SHL:
case ISD::SRL: {
SDValue N0 = Op.getOperand(0);
// Look out for (store (shl (load), x)).
if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op))
return false;
break;
}
case ISD::ADD:
case ISD::MUL:
case ISD::AND:
case ISD::OR:
case ISD::XOR:
Commute = true;
LLVM_FALLTHROUGH;
case ISD::SUB: {
SDValue N0 = Op.getOperand(0);
SDValue N1 = Op.getOperand(1);
// Avoid disabling potential load folding opportunities.
if (MayFoldLoad(N1) &&
(!Commute || !isa<ConstantSDNode>(N0) ||
(Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
return false;
if (MayFoldLoad(N0) &&
((Commute && !isa<ConstantSDNode>(N1)) ||
(Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
return false;
}
}
PVT = MVT::i32;
return true;
}
bool X86TargetLowering::
isDesirableToCombineBuildVectorToShuffleTruncate(
ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
assert(SrcVT.getVectorNumElements() == ShuffleMask.size() &&
"Element count mismatch");
assert(
Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) &&
"Shuffle Mask expected to be legal");
// For 32-bit elements VPERMD is better than shuffle+truncate.
// TODO: After we improve lowerBuildVector, add execption for VPERMW.
if (SrcVT.getScalarSizeInBits() == 32 || !Subtarget.hasAVX2())
return false;
if (is128BitLaneCrossingShuffleMask(SrcVT.getSimpleVT(), ShuffleMask))
return false;
return true;
}
//===----------------------------------------------------------------------===//
// X86 Inline Assembly Support
//===----------------------------------------------------------------------===//
// Helper to match a string separated by whitespace.
static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
for (StringRef Piece : Pieces) {
if (!S.startswith(Piece)) // Check if the piece matches.
return false;
S = S.substr(Piece.size());
StringRef::size_type Pos = S.find_first_not_of(" \t");
if (Pos == 0) // We matched a prefix.
return false;
S = S.substr(Pos);
}
return S.empty();
}
static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
if (AsmPieces.size() == 3)
return true;
else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
return true;
}
}
return false;
}
bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
const std::string &AsmStr = IA->getAsmString();
IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
if (!Ty || Ty->getBitWidth() % 16 != 0)
return false;
// TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
SmallVector<StringRef, 4> AsmPieces;
SplitString(AsmStr, AsmPieces, ";\n");
switch (AsmPieces.size()) {
default: return false;
case 1:
// FIXME: this should verify that we are targeting a 486 or better. If not,
// we will turn this bswap into something that will be lowered to logical
// ops instead of emitting the bswap asm. For now, we don't support 486 or
// lower so don't worry about this.
// bswap $0
if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
// No need to check constraints, nothing other than the equivalent of
// "=r,0" would be valid here.
return IntrinsicLowering::LowerToByteSwap(CI);
}
// rorw $$8, ${0:w} --> llvm.bswap.i16
if (CI->getType()->isIntegerTy(16) &&
IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
(matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
AsmPieces.clear();
StringRef ConstraintsStr = IA->getConstraintString();
SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
array_pod_sort(AsmPieces.begin(), AsmPieces.end());
if (clobbersFlagRegisters(AsmPieces))
return IntrinsicLowering::LowerToByteSwap(CI);
}
break;
case 3:
if (CI->getType()->isIntegerTy(32) &&
IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
AsmPieces.clear();
StringRef ConstraintsStr = IA->getConstraintString();
SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
array_pod_sort(AsmPieces.begin(), AsmPieces.end());
if (clobbersFlagRegisters(AsmPieces))
return IntrinsicLowering::LowerToByteSwap(CI);
}
if (CI->getType()->isIntegerTy(64)) {
InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
if (Constraints.size() >= 2 &&
Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
// bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
return IntrinsicLowering::LowerToByteSwap(CI);
}
}
break;
}
return false;
}
/// Given a constraint letter, return the type of constraint for this target.
X86TargetLowering::ConstraintType
X86TargetLowering::getConstraintType(StringRef Constraint) const {
if (Constraint.size() == 1) {
switch (Constraint[0]) {
case 'R':
case 'q':
case 'Q':
case 'f':
case 't':
case 'u':
case 'y':
case 'x':
case 'v':
case 'Y':
case 'l':
case 'k': // AVX512 masking registers.
return C_RegisterClass;
case 'a':
case 'b':
case 'c':
case 'd':
case 'S':
case 'D':
case 'A':
return C_Register;
case 'I':
case 'J':
case 'K':
case 'L':
case 'M':
case 'N':
case 'G':
case 'C':
case 'e':
case 'Z':
return C_Other;
default:
break;
}
}
else if (Constraint.size() == 2) {
switch (Constraint[0]) {
default:
break;
case 'Y':
switch (Constraint[1]) {
default:
break;
case 'z':
case '0':
return C_Register;
case 'i':
case 'm':
case 'k':
case 't':
case '2':
return C_RegisterClass;
}
}
}
return TargetLowering::getConstraintType(Constraint);
}
/// Examine constraint type and operand type and determine a weight value.
/// This object must already have been set up with the operand type
/// and the current alternative constraint selected.
TargetLowering::ConstraintWeight
X86TargetLowering::getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const {
ConstraintWeight weight = CW_Invalid;
Value *CallOperandVal = info.CallOperandVal;
// If we don't have a value, we can't do a match,
// but allow it at the lowest weight.
if (!CallOperandVal)
return CW_Default;
Type *type = CallOperandVal->getType();
// Look at the constraint type.
switch (*constraint) {
default:
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
LLVM_FALLTHROUGH;
case 'R':
case 'q':
case 'Q':
case 'a':
case 'b':
case 'c':
case 'd':
case 'S':
case 'D':
case 'A':
if (CallOperandVal->getType()->isIntegerTy())
weight = CW_SpecificReg;
break;
case 'f':
case 't':
case 'u':
if (type->isFloatingPointTy())
weight = CW_SpecificReg;
break;
case 'y':
if (type->isX86_MMXTy() && Subtarget.hasMMX())
weight = CW_SpecificReg;
break;
case 'Y': {
unsigned Size = StringRef(constraint).size();
// Pick 'i' as the next char as 'Yi' and 'Y' are synonymous, when matching 'Y'
char NextChar = Size == 2 ? constraint[1] : 'i';
if (Size > 2)
break;
switch (NextChar) {
default:
return CW_Invalid;
// XMM0
case 'z':
case '0':
if ((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1())
return CW_SpecificReg;
return CW_Invalid;
// Conditional OpMask regs (AVX512)
case 'k':
if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
return CW_Register;
return CW_Invalid;
// Any MMX reg
case 'm':
if (type->isX86_MMXTy() && Subtarget.hasMMX())
return weight;
return CW_Invalid;
// Any SSE reg when ISA >= SSE2, same as 'Y'
case 'i':
case 't':
case '2':
if (!Subtarget.hasSSE2())
return CW_Invalid;
break;
}
// Fall through (handle "Y" constraint).
LLVM_FALLTHROUGH;
}
case 'v':
if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
weight = CW_Register;
LLVM_FALLTHROUGH;
case 'x':
if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
weight = CW_Register;
break;
case 'k':
// Enable conditional vector operations using %k<#> registers.
if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
weight = CW_Register;
break;
case 'I':
if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
if (C->getZExtValue() <= 31)
weight = CW_Constant;
}
break;
case 'J':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if (C->getZExtValue() <= 63)
weight = CW_Constant;
}
break;
case 'K':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
weight = CW_Constant;
}
break;
case 'L':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
weight = CW_Constant;
}
break;
case 'M':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if (C->getZExtValue() <= 3)
weight = CW_Constant;
}
break;
case 'N':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if (C->getZExtValue() <= 0xff)
weight = CW_Constant;
}
break;
case 'G':
case 'C':
if (isa<ConstantFP>(CallOperandVal)) {
weight = CW_Constant;
}
break;
case 'e':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if ((C->getSExtValue() >= -0x80000000LL) &&
(C->getSExtValue() <= 0x7fffffffLL))
weight = CW_Constant;
}
break;
case 'Z':
if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
if (C->getZExtValue() <= 0xffffffff)
weight = CW_Constant;
}
break;
}
return weight;
}
/// Try to replace an X constraint, which matches anything, with another that
/// has more specific requirements based on the type of the corresponding
/// operand.
const char *X86TargetLowering::
LowerXConstraint(EVT ConstraintVT) const {
// FP X constraints get lowered to SSE1/2 registers if available, otherwise
// 'f' like normal targets.
if (ConstraintVT.isFloatingPoint()) {
if (Subtarget.hasSSE2())
return "Y";
if (Subtarget.hasSSE1())
return "x";
}
return TargetLowering::LowerXConstraint(ConstraintVT);
}
/// Lower the specified operand into the Ops vector.
/// If it is invalid, don't add anything to Ops.
void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue>&Ops,
SelectionDAG &DAG) const {
SDValue Result;
// Only support length 1 constraints for now.
if (Constraint.length() > 1) return;
char ConstraintLetter = Constraint[0];
switch (ConstraintLetter) {
default: break;
case 'I':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 31) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'J':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 63) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'K':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (isInt<8>(C->getSExtValue())) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'L':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
(Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'M':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 3) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'N':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 255) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'O':
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (C->getZExtValue() <= 127) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
return;
case 'e': {
// 32-bit signed value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
C->getSExtValue())) {
// Widen to 64 bits here to get it sign extended.
Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
break;
}
// FIXME gcc accepts some relocatable values here too, but only in certain
// memory models; it's complicated.
}
return;
}
case 'Z': {
// 32-bit unsigned value
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
C->getZExtValue())) {
Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
Op.getValueType());
break;
}
}
// FIXME gcc accepts some relocatable values here too, but only in certain
// memory models; it's complicated.
return;
}
case 'i': {
// Literal immediates are always ok.
if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
// Widen to 64 bits here to get it sign extended.
Result = DAG.getTargetConstant(CST->getSExtValue(), SDLoc(Op), MVT::i64);
break;
}
// In any sort of PIC mode addresses need to be computed at runtime by
// adding in a register or some sort of table lookup. These can't
// be used as immediates.
if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
return;
// If we are in non-pic codegen mode, we allow the address of a global (with
// an optional displacement) to be used with 'i'.
GlobalAddressSDNode *GA = nullptr;
int64_t Offset = 0;
// Match either (GA), (GA+C), (GA+C1+C2), etc.
while (1) {
if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
Offset += GA->getOffset();
break;
} else if (Op.getOpcode() == ISD::ADD) {
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
Offset += C->getZExtValue();
Op = Op.getOperand(0);
continue;
}
} else if (Op.getOpcode() == ISD::SUB) {
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
Offset += -C->getZExtValue();
Op = Op.getOperand(0);
continue;
}
}
// Otherwise, this isn't something we can handle, reject it.
return;
}
const GlobalValue *GV = GA->getGlobal();
// If we require an extra load to get this address, as in PIC mode, we
// can't accept it.
if (isGlobalStubReference(Subtarget.classifyGlobalReference(GV)))
return;
Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
GA->getValueType(0), Offset);
break;
}
}
if (Result.getNode()) {
Ops.push_back(Result);
return;
}
return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
}
/// Check if \p RC is a general purpose register class.
/// I.e., GR* or one of their variant.
static bool isGRClass(const TargetRegisterClass &RC) {
return RC.hasSuperClassEq(&X86::GR8RegClass) ||
RC.hasSuperClassEq(&X86::GR16RegClass) ||
RC.hasSuperClassEq(&X86::GR32RegClass) ||
RC.hasSuperClassEq(&X86::GR64RegClass) ||
RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
}
/// Check if \p RC is a vector register class.
/// I.e., FR* / VR* or one of their variant.
static bool isFRClass(const TargetRegisterClass &RC) {
return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
RC.hasSuperClassEq(&X86::FR64XRegClass) ||
RC.hasSuperClassEq(&X86::VR128XRegClass) ||
RC.hasSuperClassEq(&X86::VR256XRegClass) ||
RC.hasSuperClassEq(&X86::VR512RegClass);
}
std::pair<unsigned, const TargetRegisterClass *>
X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint,
MVT VT) const {
// First, see if this is a constraint that directly corresponds to an LLVM
// register class.
if (Constraint.size() == 1) {
// GCC Constraint Letters
switch (Constraint[0]) {
default: break;
// TODO: Slight differences here in allocation order and leaving
// RIP in the class. Do they matter any more here than they do
// in the normal allocation?
case 'k':
if (Subtarget.hasAVX512()) {
// Only supported in AVX512 or later.
switch (VT.SimpleTy) {
default: break;
case MVT::i32:
return std::make_pair(0U, &X86::VK32RegClass);
case MVT::i16:
return std::make_pair(0U, &X86::VK16RegClass);
case MVT::i8:
return std::make_pair(0U, &X86::VK8RegClass);
case MVT::i1:
return std::make_pair(0U, &X86::VK1RegClass);
case MVT::i64:
return std::make_pair(0U, &X86::VK64RegClass);
}
}
break;
case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
if (Subtarget.is64Bit()) {
if (VT == MVT::i32 || VT == MVT::f32)
return std::make_pair(0U, &X86::GR32RegClass);
if (VT == MVT::i16)
return std::make_pair(0U, &X86::GR16RegClass);
if (VT == MVT::i8 || VT == MVT::i1)
return std::make_pair(0U, &X86::GR8RegClass);
if (VT == MVT::i64 || VT == MVT::f64)
return std::make_pair(0U, &X86::GR64RegClass);
break;
}
LLVM_FALLTHROUGH;
// 32-bit fallthrough
case 'Q': // Q_REGS
if (VT == MVT::i32 || VT == MVT::f32)
return std::make_pair(0U, &X86::GR32_ABCDRegClass);
if (VT == MVT::i16)
return std::make_pair(0U, &X86::GR16_ABCDRegClass);
if (VT == MVT::i8 || VT == MVT::i1)
return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
if (VT == MVT::i64)
return std::make_pair(0U, &X86::GR64_ABCDRegClass);
break;
case 'r': // GENERAL_REGS
case 'l': // INDEX_REGS
if (VT == MVT::i8 || VT == MVT::i1)
return std::make_pair(0U, &X86::GR8RegClass);
if (VT == MVT::i16)
return std::make_pair(0U, &X86::GR16RegClass);
if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
return std::make_pair(0U, &X86::GR32RegClass);
return std::make_pair(0U, &X86::GR64RegClass);
case 'R': // LEGACY_REGS
if (VT == MVT::i8 || VT == MVT::i1)
return std::make_pair(0U, &X86::GR8_NOREXRegClass);
if (VT == MVT::i16)
return std::make_pair(0U, &X86::GR16_NOREXRegClass);
if (VT == MVT::i32 || !Subtarget.is64Bit())
return std::make_pair(0U, &X86::GR32_NOREXRegClass);
return std::make_pair(0U, &X86::GR64_NOREXRegClass);
case 'f': // FP Stack registers.
// If SSE is enabled for this VT, use f80 to ensure the isel moves the
// value to the correct fpstack register class.
if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
return std::make_pair(0U, &X86::RFP32RegClass);
if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
return std::make_pair(0U, &X86::RFP64RegClass);
return std::make_pair(0U, &X86::RFP80RegClass);
case 'y': // MMX_REGS if MMX allowed.
if (!Subtarget.hasMMX()) break;
return std::make_pair(0U, &X86::VR64RegClass);
case 'Y': // SSE_REGS if SSE2 allowed
if (!Subtarget.hasSSE2()) break;
LLVM_FALLTHROUGH;
case 'v':
case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
if (!Subtarget.hasSSE1()) break;
bool VConstraint = (Constraint[0] == 'v');
switch (VT.SimpleTy) {
default: break;
// Scalar SSE types.
case MVT::f32:
case MVT::i32:
if (VConstraint && Subtarget.hasAVX512() && Subtarget.hasVLX())
return std::make_pair(0U, &X86::FR32XRegClass);
return std::make_pair(0U, &X86::FR32RegClass);
case MVT::f64:
case MVT::i64:
if (VConstraint && Subtarget.hasVLX())
return std::make_pair(0U, &X86::FR64XRegClass);
return std::make_pair(0U, &X86::FR64RegClass);
// TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
// Vector types.
case MVT::v16i8:
case MVT::v8i16:
case MVT::v4i32:
case MVT::v2i64:
case MVT::v4f32:
case MVT::v2f64:
if (VConstraint && Subtarget.hasVLX())
return std::make_pair(0U, &X86::VR128XRegClass);
return std::make_pair(0U, &X86::VR128RegClass);
// AVX types.
case MVT::v32i8:
case MVT::v16i16:
case MVT::v8i32:
case MVT::v4i64:
case MVT::v8f32:
case MVT::v4f64:
if (VConstraint && Subtarget.hasVLX())
return std::make_pair(0U, &X86::VR256XRegClass);
return std::make_pair(0U, &X86::VR256RegClass);
case MVT::v8f64:
case MVT::v16f32:
case MVT::v16i32:
case MVT::v8i64:
return std::make_pair(0U, &X86::VR512RegClass);
}
break;
}
} else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
switch (Constraint[1]) {
default:
break;
case 'i':
case 't':
case '2':
return getRegForInlineAsmConstraint(TRI, "Y", VT);
case 'm':
if (!Subtarget.hasMMX()) break;
return std::make_pair(0U, &X86::VR64RegClass);
case 'z':
case '0':
if (!Subtarget.hasSSE1()) break;
return std::make_pair(X86::XMM0, &X86::VR128RegClass);
case 'k':
// This register class doesn't allocate k0 for masked vector operation.
if (Subtarget.hasAVX512()) { // Only supported in AVX512.
switch (VT.SimpleTy) {
default: break;
case MVT::i32:
return std::make_pair(0U, &X86::VK32WMRegClass);
case MVT::i16:
return std::make_pair(0U, &X86::VK16WMRegClass);
case MVT::i8:
return std::make_pair(0U, &X86::VK8WMRegClass);
case MVT::i1:
return std::make_pair(0U, &X86::VK1WMRegClass);
case MVT::i64:
return std::make_pair(0U, &X86::VK64WMRegClass);
}
}
break;
}
}
// Use the default implementation in TargetLowering to convert the register
// constraint into a member of a register class.
std::pair<unsigned, const TargetRegisterClass*> Res;
Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
// Not found as a standard register?
if (!Res.second) {
// Map st(0) -> st(7) -> ST0
if (Constraint.size() == 7 && Constraint[0] == '{' &&
tolower(Constraint[1]) == 's' &&
tolower(Constraint[2]) == 't' &&
Constraint[3] == '(' &&
(Constraint[4] >= '0' && Constraint[4] <= '7') &&
Constraint[5] == ')' &&
Constraint[6] == '}') {
Res.first = X86::FP0+Constraint[4]-'0';
Res.second = &X86::RFP80RegClass;
return Res;
}
// GCC allows "st(0)" to be called just plain "st".
if (StringRef("{st}").equals_lower(Constraint)) {
Res.first = X86::FP0;
Res.second = &X86::RFP80RegClass;
return Res;
}
// flags -> EFLAGS
if (StringRef("{flags}").equals_lower(Constraint)) {
Res.first = X86::EFLAGS;
Res.second = &X86::CCRRegClass;
return Res;
}
// 'A' means [ER]AX + [ER]DX.
if (Constraint == "A") {
if (Subtarget.is64Bit()) {
Res.first = X86::RAX;
Res.second = &X86::GR64_ADRegClass;
} else {
assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
"Expecting 64, 32 or 16 bit subtarget");
Res.first = X86::EAX;
Res.second = &X86::GR32_ADRegClass;
}
return Res;
}
return Res;
}
// Make sure it isn't a register that requires 64-bit mode.
if (!Subtarget.is64Bit() &&
(isFRClass(*Res.second) || isGRClass(*Res.second)) &&
TRI->getEncodingValue(Res.first) >= 8) {
// Register requires REX prefix, but we're in 32-bit mode.
Res.first = 0;
Res.second = nullptr;
return Res;
}
// Make sure it isn't a register that requires AVX512.
if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
TRI->getEncodingValue(Res.first) & 0x10) {
// Register requires EVEX prefix.
Res.first = 0;
Res.second = nullptr;
return Res;
}
// Otherwise, check to see if this is a register class of the wrong value
// type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
// turn into {ax},{dx}.
// MVT::Other is used to specify clobber names.
if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
return Res; // Correct type already, nothing to do.
// Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
// return "eax". This should even work for things like getting 64bit integer
// registers when given an f64 type.
const TargetRegisterClass *Class = Res.second;
// The generic code will match the first register class that contains the
// given register. Thus, based on the ordering of the tablegened file,
// the "plain" GR classes might not come first.
// Therefore, use a helper method.
if (isGRClass(*Class)) {
unsigned Size = VT.getSizeInBits();
if (Size == 1) Size = 8;
unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
if (DestReg > 0) {
bool is64Bit = Subtarget.is64Bit();
const TargetRegisterClass *RC =
Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
: Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
: Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
: &X86::GR64RegClass;
if (RC->contains(DestReg))
Res = std::make_pair(DestReg, RC);
} else {
// No register found/type mismatch.
Res.first = 0;
Res.second = nullptr;
}
} else if (isFRClass(*Class)) {
// Handle references to XMM physical registers that got mapped into the
// wrong class. This can happen with constraints like {xmm0} where the
// target independent register mapper will just pick the first match it can
// find, ignoring the required type.
// TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
if (VT == MVT::f32 || VT == MVT::i32)
Res.second = &X86::FR32RegClass;
else if (VT == MVT::f64 || VT == MVT::i64)
Res.second = &X86::FR64RegClass;
else if (TRI->isTypeLegalForClass(X86::VR128RegClass, VT))
Res.second = &X86::VR128RegClass;
else if (TRI->isTypeLegalForClass(X86::VR256RegClass, VT))
Res.second = &X86::VR256RegClass;
else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
Res.second = &X86::VR512RegClass;
else {
// Type mismatch and not a clobber: Return an error;
Res.first = 0;
Res.second = nullptr;
}
}
return Res;
}
int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
const AddrMode &AM, Type *Ty,
unsigned AS) const {
// Scaling factors are not free at all.
// An indexed folded instruction, i.e., inst (reg1, reg2, scale),
// will take 2 allocations in the out of order engine instead of 1
// for plain addressing mode, i.e. inst (reg1).
// E.g.,
// vaddps (%rsi,%rdx), %ymm0, %ymm1
// Requires two allocations (one for the load, one for the computation)
// whereas:
// vaddps (%rsi), %ymm0, %ymm1
// Requires just 1 allocation, i.e., freeing allocations for other operations
// and having less micro operations to execute.
//
// For some X86 architectures, this is even worse because for instance for
// stores, the complex addressing mode forces the instruction to use the
// "load" ports instead of the dedicated "store" port.
// E.g., on Haswell:
// vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
// vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
if (isLegalAddressingMode(DL, AM, Ty, AS))
// Scale represents reg2 * scale, thus account for 1
// as soon as we use a second register.
return AM.Scale != 0;
return -1;
}
bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
// Integer division on x86 is expensive. However, when aggressively optimizing
// for code size, we prefer to use a div instruction, as it is usually smaller
// than the alternative sequence.
// The exception to this is vector division. Since x86 doesn't have vector
// integer division, leaving the division as-is is a loss even in terms of
// size, because it will have to be scalarized, while the alternative code
// sequence can be performed in vector form.
bool OptSize =
Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
return OptSize && !VT.isVector();
}
void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
if (!Subtarget.is64Bit())
return;
// Update IsSplitCSR in X86MachineFunctionInfo.
X86MachineFunctionInfo *AFI =
Entry->getParent()->getInfo<X86MachineFunctionInfo>();
AFI->setIsSplitCSR(true);
}
void X86TargetLowering::insertCopiesSplitCSR(
MachineBasicBlock *Entry,
const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
if (!IStart)
return;
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
MachineBasicBlock::iterator MBBI = Entry->begin();
for (const MCPhysReg *I = IStart; *I; ++I) {
const TargetRegisterClass *RC = nullptr;
if (X86::GR64RegClass.contains(*I))
RC = &X86::GR64RegClass;
else
llvm_unreachable("Unexpected register class in CSRsViaCopy!");
unsigned NewVR = MRI->createVirtualRegister(RC);
// Create copy from CSR to a virtual register.
// FIXME: this currently does not emit CFI pseudo-instructions, it works
// fine for CXX_FAST_TLS since the C++-style TLS access functions should be
// nounwind. If we want to generalize this later, we may need to emit
// CFI pseudo-instructions.
assert(Entry->getParent()->getFunction().hasFnAttribute(
Attribute::NoUnwind) &&
"Function should be nounwind in insertCopiesSplitCSR!");
Entry->addLiveIn(*I);
BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
.addReg(*I);
// Insert the copy-back instructions right before the terminator.
for (auto *Exit : Exits)
BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
TII->get(TargetOpcode::COPY), *I)
.addReg(NewVR);
}
}
bool X86TargetLowering::supportSwiftError() const {
return Subtarget.is64Bit();
}
/// Returns the name of the symbol used to emit stack probes or the empty
/// string if not applicable.
StringRef X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
// If the function specifically requests stack probes, emit them.
if (MF.getFunction().hasFnAttribute("probe-stack"))
return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
// Generally, if we aren't on Windows, the platform ABI does not include
// support for stack probes, so don't emit them.
if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
return "";
// We need a stack probe to conform to the Windows ABI. Choose the right
// symbol.
if (Subtarget.is64Bit())
return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
}
Index: projects/clang700-import/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/lib/Target/X86/X86InstrInfo.cpp (revision 340125)
@@ -1,7738 +1,7738 @@
//===-- X86InstrInfo.cpp - X86 Instruction Information --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the X86 implementation of the TargetInstrInfo class.
//
//===----------------------------------------------------------------------===//
#include "X86InstrInfo.h"
#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86InstrFoldTables.h"
#include "X86MachineFunctionInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/CodeGen/LivePhysRegs.h"
#include "llvm/CodeGen/LiveVariables.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetOptions.h"
using namespace llvm;
#define DEBUG_TYPE "x86-instr-info"
#define GET_INSTRINFO_CTOR_DTOR
#include "X86GenInstrInfo.inc"
static cl::opt<bool>
NoFusing("disable-spill-fusing",
cl::desc("Disable fusing of spill code into instructions"),
cl::Hidden);
static cl::opt<bool>
PrintFailedFusing("print-failed-fuse-candidates",
cl::desc("Print instructions that the allocator wants to"
" fuse, but the X86 backend currently can't"),
cl::Hidden);
static cl::opt<bool>
ReMatPICStubLoad("remat-pic-stub-load",
cl::desc("Re-materialize load from stub in PIC mode"),
cl::init(false), cl::Hidden);
static cl::opt<unsigned>
PartialRegUpdateClearance("partial-reg-update-clearance",
cl::desc("Clearance between two register writes "
"for inserting XOR to avoid partial "
"register update"),
cl::init(64), cl::Hidden);
static cl::opt<unsigned>
UndefRegClearance("undef-reg-clearance",
cl::desc("How many idle instructions we would like before "
"certain undef register reads"),
cl::init(128), cl::Hidden);
// Pin the vtable to this file.
void X86InstrInfo::anchor() {}
X86InstrInfo::X86InstrInfo(X86Subtarget &STI)
: X86GenInstrInfo((STI.isTarget64BitLP64() ? X86::ADJCALLSTACKDOWN64
: X86::ADJCALLSTACKDOWN32),
(STI.isTarget64BitLP64() ? X86::ADJCALLSTACKUP64
: X86::ADJCALLSTACKUP32),
X86::CATCHRET,
(STI.is64Bit() ? X86::RETQ : X86::RETL)),
Subtarget(STI), RI(STI.getTargetTriple()) {
}
bool
X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const {
switch (MI.getOpcode()) {
default: break;
case X86::MOVSX16rr8:
case X86::MOVZX16rr8:
case X86::MOVSX32rr8:
case X86::MOVZX32rr8:
case X86::MOVSX64rr8:
if (!Subtarget.is64Bit())
// It's not always legal to reference the low 8-bit of the larger
// register in 32-bit mode.
return false;
LLVM_FALLTHROUGH;
case X86::MOVSX32rr16:
case X86::MOVZX32rr16:
case X86::MOVSX64rr16:
case X86::MOVSX64rr32: {
if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
// Be conservative.
return false;
SrcReg = MI.getOperand(1).getReg();
DstReg = MI.getOperand(0).getReg();
switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::MOVSX16rr8:
case X86::MOVZX16rr8:
case X86::MOVSX32rr8:
case X86::MOVZX32rr8:
case X86::MOVSX64rr8:
SubIdx = X86::sub_8bit;
break;
case X86::MOVSX32rr16:
case X86::MOVZX32rr16:
case X86::MOVSX64rr16:
SubIdx = X86::sub_16bit;
break;
case X86::MOVSX64rr32:
SubIdx = X86::sub_32bit;
break;
}
return true;
}
}
return false;
}
int X86InstrInfo::getSPAdjust(const MachineInstr &MI) const {
const MachineFunction *MF = MI.getParent()->getParent();
const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
if (isFrameInstr(MI)) {
unsigned StackAlign = TFI->getStackAlignment();
int SPAdj = alignTo(getFrameSize(MI), StackAlign);
SPAdj -= getFrameAdjustment(MI);
if (!isFrameSetup(MI))
SPAdj = -SPAdj;
return SPAdj;
}
// To know whether a call adjusts the stack, we need information
// that is bound to the following ADJCALLSTACKUP pseudo.
// Look for the next ADJCALLSTACKUP that follows the call.
if (MI.isCall()) {
const MachineBasicBlock *MBB = MI.getParent();
auto I = ++MachineBasicBlock::const_iterator(MI);
for (auto E = MBB->end(); I != E; ++I) {
if (I->getOpcode() == getCallFrameDestroyOpcode() ||
I->isCall())
break;
}
// If we could not find a frame destroy opcode, then it has already
// been simplified, so we don't care.
if (I->getOpcode() != getCallFrameDestroyOpcode())
return 0;
return -(I->getOperand(1).getImm());
}
// Currently handle only PUSHes we can reasonably expect to see
// in call sequences
switch (MI.getOpcode()) {
default:
return 0;
case X86::PUSH32i8:
case X86::PUSH32r:
case X86::PUSH32rmm:
case X86::PUSH32rmr:
case X86::PUSHi32:
return 4;
case X86::PUSH64i8:
case X86::PUSH64r:
case X86::PUSH64rmm:
case X86::PUSH64rmr:
case X86::PUSH64i32:
return 8;
}
}
/// Return true and the FrameIndex if the specified
/// operand and follow operands form a reference to the stack frame.
bool X86InstrInfo::isFrameOperand(const MachineInstr &MI, unsigned int Op,
int &FrameIndex) const {
if (MI.getOperand(Op + X86::AddrBaseReg).isFI() &&
MI.getOperand(Op + X86::AddrScaleAmt).isImm() &&
MI.getOperand(Op + X86::AddrIndexReg).isReg() &&
MI.getOperand(Op + X86::AddrDisp).isImm() &&
MI.getOperand(Op + X86::AddrScaleAmt).getImm() == 1 &&
MI.getOperand(Op + X86::AddrIndexReg).getReg() == 0 &&
MI.getOperand(Op + X86::AddrDisp).getImm() == 0) {
FrameIndex = MI.getOperand(Op + X86::AddrBaseReg).getIndex();
return true;
}
return false;
}
static bool isFrameLoadOpcode(int Opcode, unsigned &MemBytes) {
switch (Opcode) {
default:
return false;
case X86::MOV8rm:
case X86::KMOVBkm:
MemBytes = 1;
return true;
case X86::MOV16rm:
case X86::KMOVWkm:
MemBytes = 2;
return true;
case X86::MOV32rm:
case X86::MOVSSrm:
case X86::VMOVSSZrm:
case X86::VMOVSSrm:
case X86::KMOVDkm:
MemBytes = 4;
return true;
case X86::MOV64rm:
case X86::LD_Fp64m:
case X86::MOVSDrm:
case X86::VMOVSDrm:
case X86::VMOVSDZrm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
case X86::KMOVQkm:
MemBytes = 8;
return true;
case X86::MOVAPSrm:
case X86::MOVUPSrm:
case X86::MOVAPDrm:
case X86::MOVUPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
case X86::VMOVAPSrm:
case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
case X86::VMOVUPDrm:
case X86::VMOVDQArm:
case X86::VMOVDQUrm:
case X86::VMOVAPSZ128rm:
case X86::VMOVUPSZ128rm:
case X86::VMOVAPSZ128rm_NOVLX:
case X86::VMOVUPSZ128rm_NOVLX:
case X86::VMOVAPDZ128rm:
case X86::VMOVUPDZ128rm:
case X86::VMOVDQU8Z128rm:
case X86::VMOVDQU16Z128rm:
case X86::VMOVDQA32Z128rm:
case X86::VMOVDQU32Z128rm:
case X86::VMOVDQA64Z128rm:
case X86::VMOVDQU64Z128rm:
MemBytes = 16;
return true;
case X86::VMOVAPSYrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPDYrm:
case X86::VMOVUPDYrm:
case X86::VMOVDQAYrm:
case X86::VMOVDQUYrm:
case X86::VMOVAPSZ256rm:
case X86::VMOVUPSZ256rm:
case X86::VMOVAPSZ256rm_NOVLX:
case X86::VMOVUPSZ256rm_NOVLX:
case X86::VMOVAPDZ256rm:
case X86::VMOVUPDZ256rm:
case X86::VMOVDQU8Z256rm:
case X86::VMOVDQU16Z256rm:
case X86::VMOVDQA32Z256rm:
case X86::VMOVDQU32Z256rm:
case X86::VMOVDQA64Z256rm:
case X86::VMOVDQU64Z256rm:
MemBytes = 32;
return true;
case X86::VMOVAPSZrm:
case X86::VMOVUPSZrm:
case X86::VMOVAPDZrm:
case X86::VMOVUPDZrm:
case X86::VMOVDQU8Zrm:
case X86::VMOVDQU16Zrm:
case X86::VMOVDQA32Zrm:
case X86::VMOVDQU32Zrm:
case X86::VMOVDQA64Zrm:
case X86::VMOVDQU64Zrm:
MemBytes = 64;
return true;
}
}
static bool isFrameStoreOpcode(int Opcode, unsigned &MemBytes) {
switch (Opcode) {
default:
return false;
case X86::MOV8mr:
case X86::KMOVBmk:
MemBytes = 1;
return true;
case X86::MOV16mr:
case X86::KMOVWmk:
MemBytes = 2;
return true;
case X86::MOV32mr:
case X86::MOVSSmr:
case X86::VMOVSSmr:
case X86::VMOVSSZmr:
case X86::KMOVDmk:
MemBytes = 4;
return true;
case X86::MOV64mr:
case X86::ST_FpP64m:
case X86::MOVSDmr:
case X86::VMOVSDmr:
case X86::VMOVSDZmr:
case X86::MMX_MOVD64mr:
case X86::MMX_MOVQ64mr:
case X86::MMX_MOVNTQmr:
case X86::KMOVQmk:
MemBytes = 8;
return true;
case X86::MOVAPSmr:
case X86::MOVUPSmr:
case X86::MOVAPDmr:
case X86::MOVUPDmr:
case X86::MOVDQAmr:
case X86::MOVDQUmr:
case X86::VMOVAPSmr:
case X86::VMOVUPSmr:
case X86::VMOVAPDmr:
case X86::VMOVUPDmr:
case X86::VMOVDQAmr:
case X86::VMOVDQUmr:
case X86::VMOVUPSZ128mr:
case X86::VMOVAPSZ128mr:
case X86::VMOVUPSZ128mr_NOVLX:
case X86::VMOVAPSZ128mr_NOVLX:
case X86::VMOVUPDZ128mr:
case X86::VMOVAPDZ128mr:
case X86::VMOVDQA32Z128mr:
case X86::VMOVDQU32Z128mr:
case X86::VMOVDQA64Z128mr:
case X86::VMOVDQU64Z128mr:
case X86::VMOVDQU8Z128mr:
case X86::VMOVDQU16Z128mr:
MemBytes = 16;
return true;
case X86::VMOVUPSYmr:
case X86::VMOVAPSYmr:
case X86::VMOVUPDYmr:
case X86::VMOVAPDYmr:
case X86::VMOVDQUYmr:
case X86::VMOVDQAYmr:
case X86::VMOVUPSZ256mr:
case X86::VMOVAPSZ256mr:
case X86::VMOVUPSZ256mr_NOVLX:
case X86::VMOVAPSZ256mr_NOVLX:
case X86::VMOVUPDZ256mr:
case X86::VMOVAPDZ256mr:
case X86::VMOVDQU8Z256mr:
case X86::VMOVDQU16Z256mr:
case X86::VMOVDQA32Z256mr:
case X86::VMOVDQU32Z256mr:
case X86::VMOVDQA64Z256mr:
case X86::VMOVDQU64Z256mr:
MemBytes = 32;
return true;
case X86::VMOVUPSZmr:
case X86::VMOVAPSZmr:
case X86::VMOVUPDZmr:
case X86::VMOVAPDZmr:
case X86::VMOVDQU8Zmr:
case X86::VMOVDQU16Zmr:
case X86::VMOVDQA32Zmr:
case X86::VMOVDQU32Zmr:
case X86::VMOVDQA64Zmr:
case X86::VMOVDQU64Zmr:
MemBytes = 64;
return true;
}
return false;
}
unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
unsigned Dummy;
return X86InstrInfo::isLoadFromStackSlot(MI, FrameIndex, Dummy);
}
unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex,
unsigned &MemBytes) const {
if (isFrameLoadOpcode(MI.getOpcode(), MemBytes))
if (MI.getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
return MI.getOperand(0).getReg();
return 0;
}
unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
unsigned Dummy;
if (isFrameLoadOpcode(MI.getOpcode(), Dummy)) {
unsigned Reg;
if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
return Reg;
// Check for post-frame index elimination operations
const MachineMemOperand *Dummy;
return hasLoadFromStackSlot(MI, Dummy, FrameIndex);
}
return 0;
}
unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
unsigned Dummy;
return X86InstrInfo::isStoreToStackSlot(MI, FrameIndex, Dummy);
}
unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex,
unsigned &MemBytes) const {
if (isFrameStoreOpcode(MI.getOpcode(), MemBytes))
if (MI.getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
isFrameOperand(MI, 0, FrameIndex))
return MI.getOperand(X86::AddrNumOperands).getReg();
return 0;
}
unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr &MI,
int &FrameIndex) const {
unsigned Dummy;
if (isFrameStoreOpcode(MI.getOpcode(), Dummy)) {
unsigned Reg;
if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
return Reg;
// Check for post-frame index elimination operations
const MachineMemOperand *Dummy;
return hasStoreToStackSlot(MI, Dummy, FrameIndex);
}
return 0;
}
/// Return true if register is PIC base; i.e.g defined by X86::MOVPC32r.
static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
// Don't waste compile time scanning use-def chains of physregs.
if (!TargetRegisterInfo::isVirtualRegister(BaseReg))
return false;
bool isPICBase = false;
for (MachineRegisterInfo::def_instr_iterator I = MRI.def_instr_begin(BaseReg),
E = MRI.def_instr_end(); I != E; ++I) {
MachineInstr *DefMI = &*I;
if (DefMI->getOpcode() != X86::MOVPC32r)
return false;
assert(!isPICBase && "More than one PIC base?");
isPICBase = true;
}
return isPICBase;
}
bool X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI,
AliasAnalysis *AA) const {
switch (MI.getOpcode()) {
default: break;
case X86::MOV8rm:
case X86::MOV8rm_NOREX:
case X86::MOV16rm:
case X86::MOV32rm:
case X86::MOV64rm:
case X86::LD_Fp64m:
case X86::MOVSSrm:
case X86::MOVSDrm:
case X86::MOVAPSrm:
case X86::MOVUPSrm:
case X86::MOVAPDrm:
case X86::MOVUPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
case X86::VMOVSSrm:
case X86::VMOVSDrm:
case X86::VMOVAPSrm:
case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
case X86::VMOVUPDrm:
case X86::VMOVDQArm:
case X86::VMOVDQUrm:
case X86::VMOVAPSYrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPDYrm:
case X86::VMOVUPDYrm:
case X86::VMOVDQAYrm:
case X86::VMOVDQUYrm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
// AVX-512
case X86::VMOVSSZrm:
case X86::VMOVSDZrm:
case X86::VMOVAPDZ128rm:
case X86::VMOVAPDZ256rm:
case X86::VMOVAPDZrm:
case X86::VMOVAPSZ128rm:
case X86::VMOVAPSZ256rm:
case X86::VMOVAPSZ128rm_NOVLX:
case X86::VMOVAPSZ256rm_NOVLX:
case X86::VMOVAPSZrm:
case X86::VMOVDQA32Z128rm:
case X86::VMOVDQA32Z256rm:
case X86::VMOVDQA32Zrm:
case X86::VMOVDQA64Z128rm:
case X86::VMOVDQA64Z256rm:
case X86::VMOVDQA64Zrm:
case X86::VMOVDQU16Z128rm:
case X86::VMOVDQU16Z256rm:
case X86::VMOVDQU16Zrm:
case X86::VMOVDQU32Z128rm:
case X86::VMOVDQU32Z256rm:
case X86::VMOVDQU32Zrm:
case X86::VMOVDQU64Z128rm:
case X86::VMOVDQU64Z256rm:
case X86::VMOVDQU64Zrm:
case X86::VMOVDQU8Z128rm:
case X86::VMOVDQU8Z256rm:
case X86::VMOVDQU8Zrm:
case X86::VMOVUPDZ128rm:
case X86::VMOVUPDZ256rm:
case X86::VMOVUPDZrm:
case X86::VMOVUPSZ128rm:
case X86::VMOVUPSZ256rm:
case X86::VMOVUPSZ128rm_NOVLX:
case X86::VMOVUPSZ256rm_NOVLX:
case X86::VMOVUPSZrm: {
// Loads from constant pools are trivially rematerializable.
if (MI.getOperand(1 + X86::AddrBaseReg).isReg() &&
MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
MI.isDereferenceableInvariantLoad(AA)) {
unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
if (BaseReg == 0 || BaseReg == X86::RIP)
return true;
// Allow re-materialization of PIC load.
if (!ReMatPICStubLoad && MI.getOperand(1 + X86::AddrDisp).isGlobal())
return false;
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
return regIsPICBase(BaseReg, MRI);
}
return false;
}
case X86::LEA32r:
case X86::LEA64r: {
if (MI.getOperand(1 + X86::AddrScaleAmt).isImm() &&
MI.getOperand(1 + X86::AddrIndexReg).isReg() &&
MI.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
!MI.getOperand(1 + X86::AddrDisp).isReg()) {
// lea fi#, lea GV, etc. are all rematerializable.
if (!MI.getOperand(1 + X86::AddrBaseReg).isReg())
return true;
unsigned BaseReg = MI.getOperand(1 + X86::AddrBaseReg).getReg();
if (BaseReg == 0)
return true;
// Allow re-materialization of lea PICBase + x.
const MachineFunction &MF = *MI.getParent()->getParent();
const MachineRegisterInfo &MRI = MF.getRegInfo();
return regIsPICBase(BaseReg, MRI);
}
return false;
}
}
// All other instructions marked M_REMATERIALIZABLE are always trivially
// rematerializable.
return true;
}
bool X86InstrInfo::isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I) const {
MachineBasicBlock::iterator E = MBB.end();
// For compile time consideration, if we are not able to determine the
// safety after visiting 4 instructions in each direction, we will assume
// it's not safe.
MachineBasicBlock::iterator Iter = I;
for (unsigned i = 0; Iter != E && i < 4; ++i) {
bool SeenDef = false;
for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
MachineOperand &MO = Iter->getOperand(j);
if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS))
SeenDef = true;
if (!MO.isReg())
continue;
if (MO.getReg() == X86::EFLAGS) {
if (MO.isUse())
return false;
SeenDef = true;
}
}
if (SeenDef)
// This instruction defines EFLAGS, no need to look any further.
return true;
++Iter;
// Skip over debug instructions.
while (Iter != E && Iter->isDebugInstr())
++Iter;
}
// It is safe to clobber EFLAGS at the end of a block of no successor has it
// live in.
if (Iter == E) {
for (MachineBasicBlock *S : MBB.successors())
if (S->isLiveIn(X86::EFLAGS))
return false;
return true;
}
MachineBasicBlock::iterator B = MBB.begin();
Iter = I;
for (unsigned i = 0; i < 4; ++i) {
// If we make it to the beginning of the block, it's safe to clobber
// EFLAGS iff EFLAGS is not live-in.
if (Iter == B)
return !MBB.isLiveIn(X86::EFLAGS);
--Iter;
// Skip over debug instructions.
while (Iter != B && Iter->isDebugInstr())
--Iter;
bool SawKill = false;
for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
MachineOperand &MO = Iter->getOperand(j);
// A register mask may clobber EFLAGS, but we should still look for a
// live EFLAGS def.
if (MO.isRegMask() && MO.clobbersPhysReg(X86::EFLAGS))
SawKill = true;
if (MO.isReg() && MO.getReg() == X86::EFLAGS) {
if (MO.isDef()) return MO.isDead();
if (MO.isKill()) SawKill = true;
}
}
if (SawKill)
// This instruction kills EFLAGS and doesn't redefine it, so
// there's no need to look further.
return true;
}
// Conservative answer.
return false;
}
void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
unsigned DestReg, unsigned SubIdx,
const MachineInstr &Orig,
const TargetRegisterInfo &TRI) const {
bool ClobbersEFLAGS = false;
for (const MachineOperand &MO : Orig.operands()) {
if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) {
ClobbersEFLAGS = true;
break;
}
}
if (ClobbersEFLAGS && !isSafeToClobberEFLAGS(MBB, I)) {
// The instruction clobbers EFLAGS. Re-materialize as MOV32ri to avoid side
// effects.
int Value;
switch (Orig.getOpcode()) {
case X86::MOV32r0: Value = 0; break;
case X86::MOV32r1: Value = 1; break;
case X86::MOV32r_1: Value = -1; break;
default:
llvm_unreachable("Unexpected instruction!");
}
const DebugLoc &DL = Orig.getDebugLoc();
BuildMI(MBB, I, DL, get(X86::MOV32ri))
.add(Orig.getOperand(0))
.addImm(Value);
} else {
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
MBB.insert(I, MI);
}
MachineInstr &NewMI = *std::prev(I);
NewMI.substituteRegister(Orig.getOperand(0).getReg(), DestReg, SubIdx, TRI);
}
/// True if MI has a condition code def, e.g. EFLAGS, that is not marked dead.
bool X86InstrInfo::hasLiveCondCodeDef(MachineInstr &MI) const {
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (MO.isReg() && MO.isDef() &&
MO.getReg() == X86::EFLAGS && !MO.isDead()) {
return true;
}
}
return false;
}
/// Check whether the shift count for a machine operand is non-zero.
inline static unsigned getTruncatedShiftCount(MachineInstr &MI,
unsigned ShiftAmtOperandIdx) {
// The shift count is six bits with the REX.W prefix and five bits without.
unsigned ShiftCountMask = (MI.getDesc().TSFlags & X86II::REX_W) ? 63 : 31;
unsigned Imm = MI.getOperand(ShiftAmtOperandIdx).getImm();
return Imm & ShiftCountMask;
}
/// Check whether the given shift count is appropriate
/// can be represented by a LEA instruction.
inline static bool isTruncatedShiftCountForLEA(unsigned ShAmt) {
// Left shift instructions can be transformed into load-effective-address
// instructions if we can encode them appropriately.
// A LEA instruction utilizes a SIB byte to encode its scale factor.
// The SIB.scale field is two bits wide which means that we can encode any
// shift amount less than 4.
return ShAmt < 4 && ShAmt > 0;
}
bool X86InstrInfo::classifyLEAReg(MachineInstr &MI, const MachineOperand &Src,
unsigned Opc, bool AllowSP, unsigned &NewSrc,
bool &isKill, bool &isUndef,
MachineOperand &ImplicitOp,
LiveVariables *LV) const {
MachineFunction &MF = *MI.getParent()->getParent();
const TargetRegisterClass *RC;
if (AllowSP) {
RC = Opc != X86::LEA32r ? &X86::GR64RegClass : &X86::GR32RegClass;
} else {
RC = Opc != X86::LEA32r ?
&X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass;
}
unsigned SrcReg = Src.getReg();
// For both LEA64 and LEA32 the register already has essentially the right
// type (32-bit or 64-bit) we may just need to forbid SP.
if (Opc != X86::LEA64_32r) {
NewSrc = SrcReg;
isKill = Src.isKill();
isUndef = Src.isUndef();
if (TargetRegisterInfo::isVirtualRegister(NewSrc) &&
!MF.getRegInfo().constrainRegClass(NewSrc, RC))
return false;
return true;
}
// This is for an LEA64_32r and incoming registers are 32-bit. One way or
// another we need to add 64-bit registers to the final MI.
if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
ImplicitOp = Src;
ImplicitOp.setImplicit();
NewSrc = getX86SubSuperRegister(Src.getReg(), 64);
isKill = Src.isKill();
isUndef = Src.isUndef();
} else {
// Virtual register of the wrong class, we have to create a temporary 64-bit
// vreg to feed into the LEA.
NewSrc = MF.getRegInfo().createVirtualRegister(RC);
MachineInstr *Copy =
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(TargetOpcode::COPY))
.addReg(NewSrc, RegState::Define | RegState::Undef, X86::sub_32bit)
.add(Src);
// Which is obviously going to be dead after we're done with it.
isKill = true;
isUndef = false;
if (LV)
LV->replaceKillInstruction(SrcReg, MI, *Copy);
}
// We've set all the parameters without issue.
return true;
}
/// Helper for convertToThreeAddress when 16-bit LEA is disabled, use 32-bit
/// LEA to form 3-address code by promoting to a 32-bit superregister and then
/// truncating back down to a 16-bit subregister.
MachineInstr *X86InstrInfo::convertToThreeAddressWithLEA(
unsigned MIOpc, MachineFunction::iterator &MFI, MachineInstr &MI,
LiveVariables *LV) const {
MachineBasicBlock::iterator MBBI = MI.getIterator();
unsigned Dest = MI.getOperand(0).getReg();
unsigned Src = MI.getOperand(1).getReg();
bool isDead = MI.getOperand(0).isDead();
bool isKill = MI.getOperand(1).isKill();
MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
unsigned Opc, leaInReg;
if (Subtarget.is64Bit()) {
Opc = X86::LEA64_32r;
leaInReg = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
} else {
Opc = X86::LEA32r;
leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
}
// Build and insert into an implicit UNDEF value. This is OK because
// well be shifting and then extracting the lower 16-bits.
// This has the potential to cause partial register stall. e.g.
// movw (%rbp,%rcx,2), %dx
// leal -65(%rdx), %esi
// But testing has shown this *does* help performance in 64-bit mode (at
// least on modern x86 machines).
BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
MachineInstr *InsMI =
BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
.addReg(leaInReg, RegState::Define, X86::sub_16bit)
.addReg(Src, getKillRegState(isKill));
MachineInstrBuilder MIB =
BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(Opc), leaOutReg);
switch (MIOpc) {
default: llvm_unreachable("Unreachable!");
case X86::SHL16ri: {
unsigned ShAmt = MI.getOperand(2).getImm();
MIB.addReg(0).addImm(1ULL << ShAmt)
.addReg(leaInReg, RegState::Kill).addImm(0).addReg(0);
break;
}
case X86::INC16r:
addRegOffset(MIB, leaInReg, true, 1);
break;
case X86::DEC16r:
addRegOffset(MIB, leaInReg, true, -1);
break;
case X86::ADD16ri:
case X86::ADD16ri8:
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
addRegOffset(MIB, leaInReg, true, MI.getOperand(2).getImm());
break;
case X86::ADD16rr:
case X86::ADD16rr_DB: {
unsigned Src2 = MI.getOperand(2).getReg();
bool isKill2 = MI.getOperand(2).isKill();
unsigned leaInReg2 = 0;
MachineInstr *InsMI2 = nullptr;
if (Src == Src2) {
// ADD16rr killed %reg1028, %reg1028
// just a single insert_subreg.
addRegReg(MIB, leaInReg, true, leaInReg, false);
} else {
if (Subtarget.is64Bit())
leaInReg2 = RegInfo.createVirtualRegister(&X86::GR64_NOSPRegClass);
else
leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
// Build and insert into an implicit UNDEF value. This is OK because
// well be shifting and then extracting the lower 16-bits.
BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2);
InsMI2 = BuildMI(*MFI, &*MIB, MI.getDebugLoc(), get(TargetOpcode::COPY))
.addReg(leaInReg2, RegState::Define, X86::sub_16bit)
.addReg(Src2, getKillRegState(isKill2));
addRegReg(MIB, leaInReg, true, leaInReg2, true);
}
if (LV && isKill2 && InsMI2)
LV->replaceKillInstruction(Src2, MI, *InsMI2);
break;
}
}
MachineInstr *NewMI = MIB;
MachineInstr *ExtMI =
BuildMI(*MFI, MBBI, MI.getDebugLoc(), get(TargetOpcode::COPY))
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
.addReg(leaOutReg, RegState::Kill, X86::sub_16bit);
if (LV) {
// Update live variables
LV->getVarInfo(leaInReg).Kills.push_back(NewMI);
LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI);
if (isKill)
LV->replaceKillInstruction(Src, MI, *InsMI);
if (isDead)
LV->replaceKillInstruction(Dest, MI, *ExtMI);
}
return ExtMI;
}
/// This method must be implemented by targets that
/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
/// may be able to convert a two-address instruction into a true
/// three-address instruction on demand. This allows the X86 target (for
/// example) to convert ADD and SHL instructions into LEA instructions if they
/// would require register copies due to two-addressness.
///
/// This method returns a null pointer if the transformation cannot be
/// performed, otherwise it returns the new instruction.
///
MachineInstr *
X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
MachineInstr &MI, LiveVariables *LV) const {
// The following opcodes also sets the condition code register(s). Only
// convert them to equivalent lea if the condition code register def's
// are dead!
if (hasLiveCondCodeDef(MI))
return nullptr;
MachineFunction &MF = *MI.getParent()->getParent();
// All instructions input are two-addr instructions. Get the known operands.
const MachineOperand &Dest = MI.getOperand(0);
const MachineOperand &Src = MI.getOperand(1);
MachineInstr *NewMI = nullptr;
// FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
// we have better subtarget support, enable the 16-bit LEA generation here.
// 16-bit LEA is also slow on Core2.
bool DisableLEA16 = true;
bool is64Bit = Subtarget.is64Bit();
unsigned MIOpc = MI.getOpcode();
switch (MIOpc) {
default: return nullptr;
case X86::SHL64ri: {
assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
// LEA can't handle RSP.
if (TargetRegisterInfo::isVirtualRegister(Src.getReg()) &&
!MF.getRegInfo().constrainRegClass(Src.getReg(),
&X86::GR64_NOSPRegClass))
return nullptr;
NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r))
.add(Dest)
.addReg(0)
.addImm(1ULL << ShAmt)
.add(Src)
.addImm(0)
.addReg(0);
break;
}
case X86::SHL32ri: {
assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
// LEA can't handle ESP.
bool isKill, isUndef;
unsigned SrcReg;
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
SrcReg, isKill, isUndef, ImplicitOp, LV))
return nullptr;
MachineInstrBuilder MIB =
BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.addReg(0)
.addImm(1ULL << ShAmt)
.addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
.addImm(0)
.addReg(0);
if (ImplicitOp.getReg() != 0)
MIB.add(ImplicitOp);
NewMI = MIB;
break;
}
case X86::SHL16ri: {
assert(MI.getNumOperands() >= 3 && "Unknown shift instruction!");
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (!isTruncatedShiftCountForLEA(ShAmt)) return nullptr;
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
NewMI = BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r))
.add(Dest)
.addReg(0)
.addImm(1ULL << ShAmt)
.add(Src)
.addImm(0)
.addReg(0);
break;
}
case X86::INC64r:
case X86::INC32r: {
assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
bool isKill, isUndef;
unsigned SrcReg;
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
SrcReg, isKill, isUndef, ImplicitOp, LV))
return nullptr;
MachineInstrBuilder MIB =
BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.addReg(SrcReg,
getKillRegState(isKill) | getUndefRegState(isUndef));
if (ImplicitOp.getReg() != 0)
MIB.add(ImplicitOp);
NewMI = addOffset(MIB, 1);
break;
}
case X86::INC16r:
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
assert(MI.getNumOperands() >= 2 && "Unknown inc instruction!");
NewMI = addOffset(
BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), 1);
break;
case X86::DEC64r:
case X86::DEC32r: {
assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
bool isKill, isUndef;
unsigned SrcReg;
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ false,
SrcReg, isKill, isUndef, ImplicitOp, LV))
return nullptr;
MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.addReg(SrcReg, getUndefRegState(isUndef) |
getKillRegState(isKill));
if (ImplicitOp.getReg() != 0)
MIB.add(ImplicitOp);
NewMI = addOffset(MIB, -1);
break;
}
case X86::DEC16r:
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
assert(MI.getNumOperands() >= 2 && "Unknown dec instruction!");
NewMI = addOffset(
BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src), -1);
break;
case X86::ADD64rr:
case X86::ADD64rr_DB:
case X86::ADD32rr:
case X86::ADD32rr_DB: {
assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc;
if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB)
Opc = X86::LEA64r;
else
Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
bool isKill, isUndef;
unsigned SrcReg;
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
SrcReg, isKill, isUndef, ImplicitOp, LV))
return nullptr;
const MachineOperand &Src2 = MI.getOperand(2);
bool isKill2, isUndef2;
unsigned SrcReg2;
MachineOperand ImplicitOp2 = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src2, Opc, /*AllowSP=*/ false,
SrcReg2, isKill2, isUndef2, ImplicitOp2, LV))
return nullptr;
MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc)).add(Dest);
if (ImplicitOp.getReg() != 0)
MIB.add(ImplicitOp);
if (ImplicitOp2.getReg() != 0)
MIB.add(ImplicitOp2);
NewMI = addRegReg(MIB, SrcReg, isKill, SrcReg2, isKill2);
// Preserve undefness of the operands.
NewMI->getOperand(1).setIsUndef(isUndef);
NewMI->getOperand(3).setIsUndef(isUndef2);
if (LV && Src2.isKill())
LV->replaceKillInstruction(SrcReg2, MI, *NewMI);
break;
}
case X86::ADD16rr:
case X86::ADD16rr_DB: {
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Src2 = MI.getOperand(2).getReg();
bool isKill2 = MI.getOperand(2).isKill();
NewMI = addRegReg(BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest),
Src.getReg(), Src.isKill(), Src2, isKill2);
// Preserve undefness of the operands.
bool isUndef = MI.getOperand(1).isUndef();
bool isUndef2 = MI.getOperand(2).isUndef();
NewMI->getOperand(1).setIsUndef(isUndef);
NewMI->getOperand(3).setIsUndef(isUndef2);
if (LV && isKill2)
LV->replaceKillInstruction(Src2, MI, *NewMI);
break;
}
case X86::ADD64ri32:
case X86::ADD64ri8:
case X86::ADD64ri32_DB:
case X86::ADD64ri8_DB:
assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
NewMI = addOffset(
BuildMI(MF, MI.getDebugLoc(), get(X86::LEA64r)).add(Dest).add(Src),
MI.getOperand(2));
break;
case X86::ADD32ri:
case X86::ADD32ri8:
case X86::ADD32ri_DB:
case X86::ADD32ri8_DB: {
assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
bool isKill, isUndef;
unsigned SrcReg;
MachineOperand ImplicitOp = MachineOperand::CreateReg(0, false);
if (!classifyLEAReg(MI, Src, Opc, /*AllowSP=*/ true,
SrcReg, isKill, isUndef, ImplicitOp, LV))
return nullptr;
MachineInstrBuilder MIB = BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.addReg(SrcReg, getUndefRegState(isUndef) |
getKillRegState(isKill));
if (ImplicitOp.getReg() != 0)
MIB.add(ImplicitOp);
NewMI = addOffset(MIB, MI.getOperand(2));
break;
}
case X86::ADD16ri:
case X86::ADD16ri8:
case X86::ADD16ri_DB:
case X86::ADD16ri8_DB:
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MI, LV)
: nullptr;
assert(MI.getNumOperands() >= 3 && "Unknown add instruction!");
NewMI = addOffset(
BuildMI(MF, MI.getDebugLoc(), get(X86::LEA16r)).add(Dest).add(Src),
MI.getOperand(2));
break;
case X86::VMOVDQU8Z128rmk:
case X86::VMOVDQU8Z256rmk:
case X86::VMOVDQU8Zrmk:
case X86::VMOVDQU16Z128rmk:
case X86::VMOVDQU16Z256rmk:
case X86::VMOVDQU16Zrmk:
case X86::VMOVDQU32Z128rmk: case X86::VMOVDQA32Z128rmk:
case X86::VMOVDQU32Z256rmk: case X86::VMOVDQA32Z256rmk:
case X86::VMOVDQU32Zrmk: case X86::VMOVDQA32Zrmk:
case X86::VMOVDQU64Z128rmk: case X86::VMOVDQA64Z128rmk:
case X86::VMOVDQU64Z256rmk: case X86::VMOVDQA64Z256rmk:
case X86::VMOVDQU64Zrmk: case X86::VMOVDQA64Zrmk:
case X86::VMOVUPDZ128rmk: case X86::VMOVAPDZ128rmk:
case X86::VMOVUPDZ256rmk: case X86::VMOVAPDZ256rmk:
case X86::VMOVUPDZrmk: case X86::VMOVAPDZrmk:
case X86::VMOVUPSZ128rmk: case X86::VMOVAPSZ128rmk:
case X86::VMOVUPSZ256rmk: case X86::VMOVAPSZ256rmk:
case X86::VMOVUPSZrmk: case X86::VMOVAPSZrmk: {
unsigned Opc;
switch (MIOpc) {
default: llvm_unreachable("Unreachable!");
case X86::VMOVDQU8Z128rmk: Opc = X86::VPBLENDMBZ128rmk; break;
case X86::VMOVDQU8Z256rmk: Opc = X86::VPBLENDMBZ256rmk; break;
case X86::VMOVDQU8Zrmk: Opc = X86::VPBLENDMBZrmk; break;
case X86::VMOVDQU16Z128rmk: Opc = X86::VPBLENDMWZ128rmk; break;
case X86::VMOVDQU16Z256rmk: Opc = X86::VPBLENDMWZ256rmk; break;
case X86::VMOVDQU16Zrmk: Opc = X86::VPBLENDMWZrmk; break;
case X86::VMOVDQU32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break;
case X86::VMOVDQU32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break;
case X86::VMOVDQU32Zrmk: Opc = X86::VPBLENDMDZrmk; break;
case X86::VMOVDQU64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break;
case X86::VMOVDQU64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break;
case X86::VMOVDQU64Zrmk: Opc = X86::VPBLENDMQZrmk; break;
case X86::VMOVUPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break;
case X86::VMOVUPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break;
case X86::VMOVUPDZrmk: Opc = X86::VBLENDMPDZrmk; break;
case X86::VMOVUPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break;
case X86::VMOVUPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break;
case X86::VMOVUPSZrmk: Opc = X86::VBLENDMPSZrmk; break;
case X86::VMOVDQA32Z128rmk: Opc = X86::VPBLENDMDZ128rmk; break;
case X86::VMOVDQA32Z256rmk: Opc = X86::VPBLENDMDZ256rmk; break;
case X86::VMOVDQA32Zrmk: Opc = X86::VPBLENDMDZrmk; break;
case X86::VMOVDQA64Z128rmk: Opc = X86::VPBLENDMQZ128rmk; break;
case X86::VMOVDQA64Z256rmk: Opc = X86::VPBLENDMQZ256rmk; break;
case X86::VMOVDQA64Zrmk: Opc = X86::VPBLENDMQZrmk; break;
case X86::VMOVAPDZ128rmk: Opc = X86::VBLENDMPDZ128rmk; break;
case X86::VMOVAPDZ256rmk: Opc = X86::VBLENDMPDZ256rmk; break;
case X86::VMOVAPDZrmk: Opc = X86::VBLENDMPDZrmk; break;
case X86::VMOVAPSZ128rmk: Opc = X86::VBLENDMPSZ128rmk; break;
case X86::VMOVAPSZ256rmk: Opc = X86::VBLENDMPSZ256rmk; break;
case X86::VMOVAPSZrmk: Opc = X86::VBLENDMPSZrmk; break;
}
NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.add(MI.getOperand(2))
.add(Src)
.add(MI.getOperand(3))
.add(MI.getOperand(4))
.add(MI.getOperand(5))
.add(MI.getOperand(6))
.add(MI.getOperand(7));
break;
}
case X86::VMOVDQU8Z128rrk:
case X86::VMOVDQU8Z256rrk:
case X86::VMOVDQU8Zrrk:
case X86::VMOVDQU16Z128rrk:
case X86::VMOVDQU16Z256rrk:
case X86::VMOVDQU16Zrrk:
case X86::VMOVDQU32Z128rrk: case X86::VMOVDQA32Z128rrk:
case X86::VMOVDQU32Z256rrk: case X86::VMOVDQA32Z256rrk:
case X86::VMOVDQU32Zrrk: case X86::VMOVDQA32Zrrk:
case X86::VMOVDQU64Z128rrk: case X86::VMOVDQA64Z128rrk:
case X86::VMOVDQU64Z256rrk: case X86::VMOVDQA64Z256rrk:
case X86::VMOVDQU64Zrrk: case X86::VMOVDQA64Zrrk:
case X86::VMOVUPDZ128rrk: case X86::VMOVAPDZ128rrk:
case X86::VMOVUPDZ256rrk: case X86::VMOVAPDZ256rrk:
case X86::VMOVUPDZrrk: case X86::VMOVAPDZrrk:
case X86::VMOVUPSZ128rrk: case X86::VMOVAPSZ128rrk:
case X86::VMOVUPSZ256rrk: case X86::VMOVAPSZ256rrk:
case X86::VMOVUPSZrrk: case X86::VMOVAPSZrrk: {
unsigned Opc;
switch (MIOpc) {
default: llvm_unreachable("Unreachable!");
case X86::VMOVDQU8Z128rrk: Opc = X86::VPBLENDMBZ128rrk; break;
case X86::VMOVDQU8Z256rrk: Opc = X86::VPBLENDMBZ256rrk; break;
case X86::VMOVDQU8Zrrk: Opc = X86::VPBLENDMBZrrk; break;
case X86::VMOVDQU16Z128rrk: Opc = X86::VPBLENDMWZ128rrk; break;
case X86::VMOVDQU16Z256rrk: Opc = X86::VPBLENDMWZ256rrk; break;
case X86::VMOVDQU16Zrrk: Opc = X86::VPBLENDMWZrrk; break;
case X86::VMOVDQU32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
case X86::VMOVDQU32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
case X86::VMOVDQU32Zrrk: Opc = X86::VPBLENDMDZrrk; break;
case X86::VMOVDQU64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
case X86::VMOVDQU64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
case X86::VMOVDQU64Zrrk: Opc = X86::VPBLENDMQZrrk; break;
case X86::VMOVUPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break;
case X86::VMOVUPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break;
case X86::VMOVUPDZrrk: Opc = X86::VBLENDMPDZrrk; break;
case X86::VMOVUPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break;
case X86::VMOVUPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break;
case X86::VMOVUPSZrrk: Opc = X86::VBLENDMPSZrrk; break;
case X86::VMOVDQA32Z128rrk: Opc = X86::VPBLENDMDZ128rrk; break;
case X86::VMOVDQA32Z256rrk: Opc = X86::VPBLENDMDZ256rrk; break;
case X86::VMOVDQA32Zrrk: Opc = X86::VPBLENDMDZrrk; break;
case X86::VMOVDQA64Z128rrk: Opc = X86::VPBLENDMQZ128rrk; break;
case X86::VMOVDQA64Z256rrk: Opc = X86::VPBLENDMQZ256rrk; break;
case X86::VMOVDQA64Zrrk: Opc = X86::VPBLENDMQZrrk; break;
case X86::VMOVAPDZ128rrk: Opc = X86::VBLENDMPDZ128rrk; break;
case X86::VMOVAPDZ256rrk: Opc = X86::VBLENDMPDZ256rrk; break;
case X86::VMOVAPDZrrk: Opc = X86::VBLENDMPDZrrk; break;
case X86::VMOVAPSZ128rrk: Opc = X86::VBLENDMPSZ128rrk; break;
case X86::VMOVAPSZ256rrk: Opc = X86::VBLENDMPSZ256rrk; break;
case X86::VMOVAPSZrrk: Opc = X86::VBLENDMPSZrrk; break;
}
NewMI = BuildMI(MF, MI.getDebugLoc(), get(Opc))
.add(Dest)
.add(MI.getOperand(2))
.add(Src)
.add(MI.getOperand(3));
break;
}
}
if (!NewMI) return nullptr;
if (LV) { // Update live variables
if (Src.isKill())
LV->replaceKillInstruction(Src.getReg(), MI, *NewMI);
if (Dest.isDead())
LV->replaceKillInstruction(Dest.getReg(), MI, *NewMI);
}
MFI->insert(MI.getIterator(), NewMI); // Insert the new inst
return NewMI;
}
/// This determines which of three possible cases of a three source commute
/// the source indexes correspond to taking into account any mask operands.
/// All prevents commuting a passthru operand. Returns -1 if the commute isn't
/// possible.
/// Case 0 - Possible to commute the first and second operands.
/// Case 1 - Possible to commute the first and third operands.
/// Case 2 - Possible to commute the second and third operands.
static unsigned getThreeSrcCommuteCase(uint64_t TSFlags, unsigned SrcOpIdx1,
unsigned SrcOpIdx2) {
// Put the lowest index to SrcOpIdx1 to simplify the checks below.
if (SrcOpIdx1 > SrcOpIdx2)
std::swap(SrcOpIdx1, SrcOpIdx2);
unsigned Op1 = 1, Op2 = 2, Op3 = 3;
if (X86II::isKMasked(TSFlags)) {
Op2++;
Op3++;
}
if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op2)
return 0;
if (SrcOpIdx1 == Op1 && SrcOpIdx2 == Op3)
return 1;
if (SrcOpIdx1 == Op2 && SrcOpIdx2 == Op3)
return 2;
llvm_unreachable("Unknown three src commute case.");
}
unsigned X86InstrInfo::getFMA3OpcodeToCommuteOperands(
const MachineInstr &MI, unsigned SrcOpIdx1, unsigned SrcOpIdx2,
const X86InstrFMA3Group &FMA3Group) const {
unsigned Opc = MI.getOpcode();
// TODO: Commuting the 1st operand of FMA*_Int requires some additional
// analysis. The commute optimization is legal only if all users of FMA*_Int
// use only the lowest element of the FMA*_Int instruction. Such analysis are
// not implemented yet. So, just return 0 in that case.
// When such analysis are available this place will be the right place for
// calling it.
assert(!(FMA3Group.isIntrinsic() && (SrcOpIdx1 == 1 || SrcOpIdx2 == 1)) &&
"Intrinsic instructions can't commute operand 1");
// Determine which case this commute is or if it can't be done.
unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
SrcOpIdx2);
assert(Case < 3 && "Unexpected case number!");
// Define the FMA forms mapping array that helps to map input FMA form
// to output FMA form to preserve the operation semantics after
// commuting the operands.
const unsigned Form132Index = 0;
const unsigned Form213Index = 1;
const unsigned Form231Index = 2;
static const unsigned FormMapping[][3] = {
// 0: SrcOpIdx1 == 1 && SrcOpIdx2 == 2;
// FMA132 A, C, b; ==> FMA231 C, A, b;
// FMA213 B, A, c; ==> FMA213 A, B, c;
// FMA231 C, A, b; ==> FMA132 A, C, b;
{ Form231Index, Form213Index, Form132Index },
// 1: SrcOpIdx1 == 1 && SrcOpIdx2 == 3;
// FMA132 A, c, B; ==> FMA132 B, c, A;
// FMA213 B, a, C; ==> FMA231 C, a, B;
// FMA231 C, a, B; ==> FMA213 B, a, C;
{ Form132Index, Form231Index, Form213Index },
// 2: SrcOpIdx1 == 2 && SrcOpIdx2 == 3;
// FMA132 a, C, B; ==> FMA213 a, B, C;
// FMA213 b, A, C; ==> FMA132 b, C, A;
// FMA231 c, A, B; ==> FMA231 c, B, A;
{ Form213Index, Form132Index, Form231Index }
};
unsigned FMAForms[3];
FMAForms[0] = FMA3Group.get132Opcode();
FMAForms[1] = FMA3Group.get213Opcode();
FMAForms[2] = FMA3Group.get231Opcode();
unsigned FormIndex;
for (FormIndex = 0; FormIndex < 3; FormIndex++)
if (Opc == FMAForms[FormIndex])
break;
// Everything is ready, just adjust the FMA opcode and return it.
FormIndex = FormMapping[Case][FormIndex];
return FMAForms[FormIndex];
}
static void commuteVPTERNLOG(MachineInstr &MI, unsigned SrcOpIdx1,
unsigned SrcOpIdx2) {
// Determine which case this commute is or if it can't be done.
unsigned Case = getThreeSrcCommuteCase(MI.getDesc().TSFlags, SrcOpIdx1,
SrcOpIdx2);
assert(Case < 3 && "Unexpected case value!");
// For each case we need to swap two pairs of bits in the final immediate.
static const uint8_t SwapMasks[3][4] = {
{ 0x04, 0x10, 0x08, 0x20 }, // Swap bits 2/4 and 3/5.
{ 0x02, 0x10, 0x08, 0x40 }, // Swap bits 1/4 and 3/6.
{ 0x02, 0x04, 0x20, 0x40 }, // Swap bits 1/2 and 5/6.
};
uint8_t Imm = MI.getOperand(MI.getNumOperands()-1).getImm();
// Clear out the bits we are swapping.
uint8_t NewImm = Imm & ~(SwapMasks[Case][0] | SwapMasks[Case][1] |
SwapMasks[Case][2] | SwapMasks[Case][3]);
// If the immediate had a bit of the pair set, then set the opposite bit.
if (Imm & SwapMasks[Case][0]) NewImm |= SwapMasks[Case][1];
if (Imm & SwapMasks[Case][1]) NewImm |= SwapMasks[Case][0];
if (Imm & SwapMasks[Case][2]) NewImm |= SwapMasks[Case][3];
if (Imm & SwapMasks[Case][3]) NewImm |= SwapMasks[Case][2];
MI.getOperand(MI.getNumOperands()-1).setImm(NewImm);
}
// Returns true if this is a VPERMI2 or VPERMT2 instruction that can be
// commuted.
static bool isCommutableVPERMV3Instruction(unsigned Opcode) {
#define VPERM_CASES(Suffix) \
case X86::VPERMI2##Suffix##128rr: case X86::VPERMT2##Suffix##128rr: \
case X86::VPERMI2##Suffix##256rr: case X86::VPERMT2##Suffix##256rr: \
case X86::VPERMI2##Suffix##rr: case X86::VPERMT2##Suffix##rr: \
case X86::VPERMI2##Suffix##128rm: case X86::VPERMT2##Suffix##128rm: \
case X86::VPERMI2##Suffix##256rm: case X86::VPERMT2##Suffix##256rm: \
case X86::VPERMI2##Suffix##rm: case X86::VPERMT2##Suffix##rm: \
case X86::VPERMI2##Suffix##128rrkz: case X86::VPERMT2##Suffix##128rrkz: \
case X86::VPERMI2##Suffix##256rrkz: case X86::VPERMT2##Suffix##256rrkz: \
case X86::VPERMI2##Suffix##rrkz: case X86::VPERMT2##Suffix##rrkz: \
case X86::VPERMI2##Suffix##128rmkz: case X86::VPERMT2##Suffix##128rmkz: \
case X86::VPERMI2##Suffix##256rmkz: case X86::VPERMT2##Suffix##256rmkz: \
case X86::VPERMI2##Suffix##rmkz: case X86::VPERMT2##Suffix##rmkz:
#define VPERM_CASES_BROADCAST(Suffix) \
VPERM_CASES(Suffix) \
case X86::VPERMI2##Suffix##128rmb: case X86::VPERMT2##Suffix##128rmb: \
case X86::VPERMI2##Suffix##256rmb: case X86::VPERMT2##Suffix##256rmb: \
case X86::VPERMI2##Suffix##rmb: case X86::VPERMT2##Suffix##rmb: \
case X86::VPERMI2##Suffix##128rmbkz: case X86::VPERMT2##Suffix##128rmbkz: \
case X86::VPERMI2##Suffix##256rmbkz: case X86::VPERMT2##Suffix##256rmbkz: \
case X86::VPERMI2##Suffix##rmbkz: case X86::VPERMT2##Suffix##rmbkz:
switch (Opcode) {
default: return false;
VPERM_CASES(B)
VPERM_CASES_BROADCAST(D)
VPERM_CASES_BROADCAST(PD)
VPERM_CASES_BROADCAST(PS)
VPERM_CASES_BROADCAST(Q)
VPERM_CASES(W)
return true;
}
#undef VPERM_CASES_BROADCAST
#undef VPERM_CASES
}
// Returns commuted opcode for VPERMI2 and VPERMT2 instructions by switching
// from the I opcode to the T opcode and vice versa.
static unsigned getCommutedVPERMV3Opcode(unsigned Opcode) {
#define VPERM_CASES(Orig, New) \
case X86::Orig##128rr: return X86::New##128rr; \
case X86::Orig##128rrkz: return X86::New##128rrkz; \
case X86::Orig##128rm: return X86::New##128rm; \
case X86::Orig##128rmkz: return X86::New##128rmkz; \
case X86::Orig##256rr: return X86::New##256rr; \
case X86::Orig##256rrkz: return X86::New##256rrkz; \
case X86::Orig##256rm: return X86::New##256rm; \
case X86::Orig##256rmkz: return X86::New##256rmkz; \
case X86::Orig##rr: return X86::New##rr; \
case X86::Orig##rrkz: return X86::New##rrkz; \
case X86::Orig##rm: return X86::New##rm; \
case X86::Orig##rmkz: return X86::New##rmkz;
#define VPERM_CASES_BROADCAST(Orig, New) \
VPERM_CASES(Orig, New) \
case X86::Orig##128rmb: return X86::New##128rmb; \
case X86::Orig##128rmbkz: return X86::New##128rmbkz; \
case X86::Orig##256rmb: return X86::New##256rmb; \
case X86::Orig##256rmbkz: return X86::New##256rmbkz; \
case X86::Orig##rmb: return X86::New##rmb; \
case X86::Orig##rmbkz: return X86::New##rmbkz;
switch (Opcode) {
VPERM_CASES(VPERMI2B, VPERMT2B)
VPERM_CASES_BROADCAST(VPERMI2D, VPERMT2D)
VPERM_CASES_BROADCAST(VPERMI2PD, VPERMT2PD)
VPERM_CASES_BROADCAST(VPERMI2PS, VPERMT2PS)
VPERM_CASES_BROADCAST(VPERMI2Q, VPERMT2Q)
VPERM_CASES(VPERMI2W, VPERMT2W)
VPERM_CASES(VPERMT2B, VPERMI2B)
VPERM_CASES_BROADCAST(VPERMT2D, VPERMI2D)
VPERM_CASES_BROADCAST(VPERMT2PD, VPERMI2PD)
VPERM_CASES_BROADCAST(VPERMT2PS, VPERMI2PS)
VPERM_CASES_BROADCAST(VPERMT2Q, VPERMI2Q)
VPERM_CASES(VPERMT2W, VPERMI2W)
}
llvm_unreachable("Unreachable!");
#undef VPERM_CASES_BROADCAST
#undef VPERM_CASES
}
MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI,
unsigned OpIdx1,
unsigned OpIdx2) const {
auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
if (NewMI)
return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
return MI;
};
switch (MI.getOpcode()) {
case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
unsigned Opc;
unsigned Size;
switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
}
unsigned Amt = MI.getOperand(3).getImm();
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
WorkingMI.getOperand(3).setImm(Size - Amt);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::PFSUBrr:
case X86::PFSUBRrr: {
// PFSUB x, y: x = x - y
// PFSUBR x, y: x = y - x
unsigned Opc =
(X86::PFSUBRrr == MI.getOpcode() ? X86::PFSUBrr : X86::PFSUBRrr);
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::BLENDPDrri:
case X86::BLENDPSrri:
case X86::VBLENDPDrri:
case X86::VBLENDPSrri:
// If we're optimizing for size, try to use MOVSD/MOVSS.
if (MI.getParent()->getParent()->getFunction().optForSize()) {
unsigned Mask, Opc;
switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::BLENDPDrri: Opc = X86::MOVSDrr; Mask = 0x03; break;
case X86::BLENDPSrri: Opc = X86::MOVSSrr; Mask = 0x0F; break;
case X86::VBLENDPDrri: Opc = X86::VMOVSDrr; Mask = 0x03; break;
case X86::VBLENDPSrri: Opc = X86::VMOVSSrr; Mask = 0x0F; break;
}
if ((MI.getOperand(3).getImm() ^ Mask) == 1) {
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
WorkingMI.RemoveOperand(3);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI,
/*NewMI=*/false,
OpIdx1, OpIdx2);
}
}
LLVM_FALLTHROUGH;
case X86::PBLENDWrri:
case X86::VBLENDPDYrri:
case X86::VBLENDPSYrri:
case X86::VPBLENDDrri:
case X86::VPBLENDWrri:
case X86::VPBLENDDYrri:
case X86::VPBLENDWYrri:{
unsigned Mask;
switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::BLENDPDrri: Mask = 0x03; break;
case X86::BLENDPSrri: Mask = 0x0F; break;
case X86::PBLENDWrri: Mask = 0xFF; break;
case X86::VBLENDPDrri: Mask = 0x03; break;
case X86::VBLENDPSrri: Mask = 0x0F; break;
case X86::VBLENDPDYrri: Mask = 0x0F; break;
case X86::VBLENDPSYrri: Mask = 0xFF; break;
case X86::VPBLENDDrri: Mask = 0x0F; break;
case X86::VPBLENDWrri: Mask = 0xFF; break;
case X86::VPBLENDDYrri: Mask = 0xFF; break;
case X86::VPBLENDWYrri: Mask = 0xFF; break;
}
// Only the least significant bits of Imm are used.
unsigned Imm = MI.getOperand(3).getImm() & Mask;
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.getOperand(3).setImm(Mask ^ Imm);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::MOVSDrr:
case X86::MOVSSrr:
case X86::VMOVSDrr:
case X86::VMOVSSrr:{
// On SSE41 or later we can commute a MOVSS/MOVSD to a BLENDPS/BLENDPD.
assert(Subtarget.hasSSE41() && "Commuting MOVSD/MOVSS requires SSE41!");
unsigned Mask, Opc;
switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::MOVSDrr: Opc = X86::BLENDPDrri; Mask = 0x02; break;
case X86::MOVSSrr: Opc = X86::BLENDPSrri; Mask = 0x0E; break;
case X86::VMOVSDrr: Opc = X86::VBLENDPDrri; Mask = 0x02; break;
case X86::VMOVSSrr: Opc = X86::VBLENDPSrri; Mask = 0x0E; break;
}
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
WorkingMI.addOperand(MachineOperand::CreateImm(Mask));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::PCLMULQDQrr:
case X86::VPCLMULQDQrr:
case X86::VPCLMULQDQYrr:
case X86::VPCLMULQDQZrr:
case X86::VPCLMULQDQZ128rr:
case X86::VPCLMULQDQZ256rr: {
// SRC1 64bits = Imm[0] ? SRC1[127:64] : SRC1[63:0]
// SRC2 64bits = Imm[4] ? SRC2[127:64] : SRC2[63:0]
unsigned Imm = MI.getOperand(3).getImm();
unsigned Src1Hi = Imm & 0x01;
unsigned Src2Hi = Imm & 0x10;
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.getOperand(3).setImm((Src1Hi << 4) | (Src2Hi >> 4));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::VPCMPBZ128rri: case X86::VPCMPUBZ128rri:
case X86::VPCMPBZ256rri: case X86::VPCMPUBZ256rri:
case X86::VPCMPBZrri: case X86::VPCMPUBZrri:
case X86::VPCMPDZ128rri: case X86::VPCMPUDZ128rri:
case X86::VPCMPDZ256rri: case X86::VPCMPUDZ256rri:
case X86::VPCMPDZrri: case X86::VPCMPUDZrri:
case X86::VPCMPQZ128rri: case X86::VPCMPUQZ128rri:
case X86::VPCMPQZ256rri: case X86::VPCMPUQZ256rri:
case X86::VPCMPQZrri: case X86::VPCMPUQZrri:
case X86::VPCMPWZ128rri: case X86::VPCMPUWZ128rri:
case X86::VPCMPWZ256rri: case X86::VPCMPUWZ256rri:
case X86::VPCMPWZrri: case X86::VPCMPUWZrri:
case X86::VPCMPBZ128rrik: case X86::VPCMPUBZ128rrik:
case X86::VPCMPBZ256rrik: case X86::VPCMPUBZ256rrik:
case X86::VPCMPBZrrik: case X86::VPCMPUBZrrik:
case X86::VPCMPDZ128rrik: case X86::VPCMPUDZ128rrik:
case X86::VPCMPDZ256rrik: case X86::VPCMPUDZ256rrik:
case X86::VPCMPDZrrik: case X86::VPCMPUDZrrik:
case X86::VPCMPQZ128rrik: case X86::VPCMPUQZ128rrik:
case X86::VPCMPQZ256rrik: case X86::VPCMPUQZ256rrik:
case X86::VPCMPQZrrik: case X86::VPCMPUQZrrik:
case X86::VPCMPWZ128rrik: case X86::VPCMPUWZ128rrik:
case X86::VPCMPWZ256rrik: case X86::VPCMPUWZ256rrik:
case X86::VPCMPWZrrik: case X86::VPCMPUWZrrik: {
// Flip comparison mode immediate (if necessary).
unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm() & 0x7;
Imm = X86::getSwappedVPCMPImm(Imm);
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.getOperand(MI.getNumOperands() - 1).setImm(Imm);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::VPCOMBri: case X86::VPCOMUBri:
case X86::VPCOMDri: case X86::VPCOMUDri:
case X86::VPCOMQri: case X86::VPCOMUQri:
case X86::VPCOMWri: case X86::VPCOMUWri: {
// Flip comparison mode immediate (if necessary).
unsigned Imm = MI.getOperand(3).getImm() & 0x7;
Imm = X86::getSwappedVPCOMImm(Imm);
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.getOperand(3).setImm(Imm);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::VPERM2F128rr:
case X86::VPERM2I128rr: {
// Flip permute source immediate.
// Imm & 0x02: lo = if set, select Op1.lo/hi else Op0.lo/hi.
// Imm & 0x20: hi = if set, select Op1.lo/hi else Op0.lo/hi.
unsigned Imm = MI.getOperand(3).getImm() & 0xFF;
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.getOperand(3).setImm(Imm ^ 0x22);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::MOVHLPSrr:
case X86::UNPCKHPDrr:
case X86::VMOVHLPSrr:
case X86::VUNPCKHPDrr:
case X86::VMOVHLPSZrr:
case X86::VUNPCKHPDZ128rr: {
assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!");
unsigned Opc = MI.getOpcode();
switch (Opc) {
default: llvm_unreachable("Unreachable!");
case X86::MOVHLPSrr: Opc = X86::UNPCKHPDrr; break;
case X86::UNPCKHPDrr: Opc = X86::MOVHLPSrr; break;
case X86::VMOVHLPSrr: Opc = X86::VUNPCKHPDrr; break;
case X86::VUNPCKHPDrr: Opc = X86::VMOVHLPSrr; break;
case X86::VMOVHLPSZrr: Opc = X86::VUNPCKHPDZ128rr; break;
case X86::VUNPCKHPDZ128rr: Opc = X86::VMOVHLPSZrr; break;
}
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::CMOVB16rr: case X86::CMOVB32rr: case X86::CMOVB64rr:
case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr:
case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr:
case X86::CMOVNE16rr: case X86::CMOVNE32rr: case X86::CMOVNE64rr:
case X86::CMOVBE16rr: case X86::CMOVBE32rr: case X86::CMOVBE64rr:
case X86::CMOVA16rr: case X86::CMOVA32rr: case X86::CMOVA64rr:
case X86::CMOVL16rr: case X86::CMOVL32rr: case X86::CMOVL64rr:
case X86::CMOVGE16rr: case X86::CMOVGE32rr: case X86::CMOVGE64rr:
case X86::CMOVLE16rr: case X86::CMOVLE32rr: case X86::CMOVLE64rr:
case X86::CMOVG16rr: case X86::CMOVG32rr: case X86::CMOVG64rr:
case X86::CMOVS16rr: case X86::CMOVS32rr: case X86::CMOVS64rr:
case X86::CMOVNS16rr: case X86::CMOVNS32rr: case X86::CMOVNS64rr:
case X86::CMOVP16rr: case X86::CMOVP32rr: case X86::CMOVP64rr:
case X86::CMOVNP16rr: case X86::CMOVNP32rr: case X86::CMOVNP64rr:
case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr:
case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr: {
unsigned Opc;
switch (MI.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break;
case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break;
case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break;
case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break;
case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break;
case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break;
case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break;
case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break;
case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break;
case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break;
case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break;
case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break;
case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break;
case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break;
case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break;
case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break;
case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break;
case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break;
case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break;
case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break;
case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break;
case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break;
case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break;
case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break;
case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break;
case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break;
case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break;
case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break;
case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break;
case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break;
case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break;
case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break;
case X86::CMOVS64rr: Opc = X86::CMOVNS64rr; break;
case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break;
case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break;
case X86::CMOVP64rr: Opc = X86::CMOVNP64rr; break;
case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
case X86::CMOVO16rr: Opc = X86::CMOVNO16rr; break;
case X86::CMOVO32rr: Opc = X86::CMOVNO32rr; break;
case X86::CMOVO64rr: Opc = X86::CMOVNO64rr; break;
case X86::CMOVNO16rr: Opc = X86::CMOVO16rr; break;
case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break;
case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break;
}
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi:
case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi:
case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi:
case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi:
case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi:
case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi:
case X86::VPTERNLOGDZrrik:
case X86::VPTERNLOGDZ128rrik:
case X86::VPTERNLOGDZ256rrik:
case X86::VPTERNLOGQZrrik:
case X86::VPTERNLOGQZ128rrik:
case X86::VPTERNLOGQZ256rrik:
case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz:
case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz:
case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
case X86::VPTERNLOGDZ128rmbi:
case X86::VPTERNLOGDZ256rmbi:
case X86::VPTERNLOGDZrmbi:
case X86::VPTERNLOGQZ128rmbi:
case X86::VPTERNLOGQZ256rmbi:
case X86::VPTERNLOGQZrmbi:
case X86::VPTERNLOGDZ128rmbikz:
case X86::VPTERNLOGDZ256rmbikz:
case X86::VPTERNLOGDZrmbikz:
case X86::VPTERNLOGQZ128rmbikz:
case X86::VPTERNLOGQZ256rmbikz:
case X86::VPTERNLOGQZrmbikz: {
auto &WorkingMI = cloneIfNew(MI);
commuteVPTERNLOG(WorkingMI, OpIdx1, OpIdx2);
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
default: {
if (isCommutableVPERMV3Instruction(MI.getOpcode())) {
unsigned Opc = getCommutedVPERMV3Opcode(MI.getOpcode());
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
MI.getDesc().TSFlags);
if (FMA3Group) {
unsigned Opc =
getFMA3OpcodeToCommuteOperands(MI, OpIdx1, OpIdx2, *FMA3Group);
auto &WorkingMI = cloneIfNew(MI);
WorkingMI.setDesc(get(Opc));
return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
OpIdx1, OpIdx2);
}
return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
}
}
}
bool
X86InstrInfo::findThreeSrcCommutedOpIndices(const MachineInstr &MI,
unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2,
bool IsIntrinsic) const {
uint64_t TSFlags = MI.getDesc().TSFlags;
unsigned FirstCommutableVecOp = 1;
unsigned LastCommutableVecOp = 3;
unsigned KMaskOp = -1U;
if (X86II::isKMasked(TSFlags)) {
// For k-zero-masked operations it is Ok to commute the first vector
// operand.
// For regular k-masked operations a conservative choice is done as the
// elements of the first vector operand, for which the corresponding bit
// in the k-mask operand is set to 0, are copied to the result of the
// instruction.
// TODO/FIXME: The commute still may be legal if it is known that the
// k-mask operand is set to either all ones or all zeroes.
// It is also Ok to commute the 1st operand if all users of MI use only
// the elements enabled by the k-mask operand. For example,
// v4 = VFMADD213PSZrk v1, k, v2, v3; // v1[i] = k[i] ? v2[i]*v1[i]+v3[i]
// : v1[i];
// VMOVAPSZmrk <mem_addr>, k, v4; // this is the ONLY user of v4 ->
// // Ok, to commute v1 in FMADD213PSZrk.
// The k-mask operand has index = 2 for masked and zero-masked operations.
KMaskOp = 2;
// The operand with index = 1 is used as a source for those elements for
// which the corresponding bit in the k-mask is set to 0.
if (X86II::isKMergeMasked(TSFlags))
FirstCommutableVecOp = 3;
LastCommutableVecOp++;
} else if (IsIntrinsic) {
// Commuting the first operand of an intrinsic instruction isn't possible
// unless we can prove that only the lowest element of the result is used.
FirstCommutableVecOp = 2;
}
if (isMem(MI, LastCommutableVecOp))
LastCommutableVecOp--;
// Only the first RegOpsNum operands are commutable.
// Also, the value 'CommuteAnyOperandIndex' is valid here as it means
// that the operand is not specified/fixed.
if (SrcOpIdx1 != CommuteAnyOperandIndex &&
(SrcOpIdx1 < FirstCommutableVecOp || SrcOpIdx1 > LastCommutableVecOp ||
SrcOpIdx1 == KMaskOp))
return false;
if (SrcOpIdx2 != CommuteAnyOperandIndex &&
(SrcOpIdx2 < FirstCommutableVecOp || SrcOpIdx2 > LastCommutableVecOp ||
SrcOpIdx2 == KMaskOp))
return false;
// Look for two different register operands assumed to be commutable
// regardless of the FMA opcode. The FMA opcode is adjusted later.
if (SrcOpIdx1 == CommuteAnyOperandIndex ||
SrcOpIdx2 == CommuteAnyOperandIndex) {
unsigned CommutableOpIdx1 = SrcOpIdx1;
unsigned CommutableOpIdx2 = SrcOpIdx2;
// At least one of operands to be commuted is not specified and
// this method is free to choose appropriate commutable operands.
if (SrcOpIdx1 == SrcOpIdx2)
// Both of operands are not fixed. By default set one of commutable
// operands to the last register operand of the instruction.
CommutableOpIdx2 = LastCommutableVecOp;
else if (SrcOpIdx2 == CommuteAnyOperandIndex)
// Only one of operands is not fixed.
CommutableOpIdx2 = SrcOpIdx1;
// CommutableOpIdx2 is well defined now. Let's choose another commutable
// operand and assign its index to CommutableOpIdx1.
unsigned Op2Reg = MI.getOperand(CommutableOpIdx2).getReg();
for (CommutableOpIdx1 = LastCommutableVecOp;
CommutableOpIdx1 >= FirstCommutableVecOp; CommutableOpIdx1--) {
// Just ignore and skip the k-mask operand.
if (CommutableOpIdx1 == KMaskOp)
continue;
// The commuted operands must have different registers.
// Otherwise, the commute transformation does not change anything and
// is useless then.
if (Op2Reg != MI.getOperand(CommutableOpIdx1).getReg())
break;
}
// No appropriate commutable operands were found.
if (CommutableOpIdx1 < FirstCommutableVecOp)
return false;
// Assign the found pair of commutable indices to SrcOpIdx1 and SrcOpidx2
// to return those values.
if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
CommutableOpIdx1, CommutableOpIdx2))
return false;
}
return true;
}
bool X86InstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1,
unsigned &SrcOpIdx2) const {
const MCInstrDesc &Desc = MI.getDesc();
if (!Desc.isCommutable())
return false;
switch (MI.getOpcode()) {
case X86::CMPSDrr:
case X86::CMPSSrr:
case X86::CMPPDrri:
case X86::CMPPSrri:
case X86::VCMPSDrr:
case X86::VCMPSSrr:
case X86::VCMPPDrri:
case X86::VCMPPSrri:
case X86::VCMPPDYrri:
case X86::VCMPPSYrri:
case X86::VCMPSDZrr:
case X86::VCMPSSZrr:
case X86::VCMPPDZrri:
case X86::VCMPPSZrri:
case X86::VCMPPDZ128rri:
case X86::VCMPPSZ128rri:
case X86::VCMPPDZ256rri:
case X86::VCMPPSZ256rri: {
// Float comparison can be safely commuted for
// Ordered/Unordered/Equal/NotEqual tests
unsigned Imm = MI.getOperand(3).getImm() & 0x7;
switch (Imm) {
case 0x00: // EQUAL
case 0x03: // UNORDERED
case 0x04: // NOT EQUAL
case 0x07: // ORDERED
// The indices of the commutable operands are 1 and 2.
// Assign them to the returned operand indices here.
return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
}
return false;
}
case X86::MOVSDrr:
case X86::MOVSSrr:
case X86::VMOVSDrr:
case X86::VMOVSSrr:
if (Subtarget.hasSSE41())
return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
return false;
case X86::MOVHLPSrr:
case X86::UNPCKHPDrr:
case X86::VMOVHLPSrr:
case X86::VUNPCKHPDrr:
case X86::VMOVHLPSZrr:
case X86::VUNPCKHPDZ128rr:
if (Subtarget.hasSSE2())
return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
return false;
case X86::VPTERNLOGDZrri: case X86::VPTERNLOGDZrmi:
case X86::VPTERNLOGDZ128rri: case X86::VPTERNLOGDZ128rmi:
case X86::VPTERNLOGDZ256rri: case X86::VPTERNLOGDZ256rmi:
case X86::VPTERNLOGQZrri: case X86::VPTERNLOGQZrmi:
case X86::VPTERNLOGQZ128rri: case X86::VPTERNLOGQZ128rmi:
case X86::VPTERNLOGQZ256rri: case X86::VPTERNLOGQZ256rmi:
case X86::VPTERNLOGDZrrik:
case X86::VPTERNLOGDZ128rrik:
case X86::VPTERNLOGDZ256rrik:
case X86::VPTERNLOGQZrrik:
case X86::VPTERNLOGQZ128rrik:
case X86::VPTERNLOGQZ256rrik:
case X86::VPTERNLOGDZrrikz: case X86::VPTERNLOGDZrmikz:
case X86::VPTERNLOGDZ128rrikz: case X86::VPTERNLOGDZ128rmikz:
case X86::VPTERNLOGDZ256rrikz: case X86::VPTERNLOGDZ256rmikz:
case X86::VPTERNLOGQZrrikz: case X86::VPTERNLOGQZrmikz:
case X86::VPTERNLOGQZ128rrikz: case X86::VPTERNLOGQZ128rmikz:
case X86::VPTERNLOGQZ256rrikz: case X86::VPTERNLOGQZ256rmikz:
case X86::VPTERNLOGDZ128rmbi:
case X86::VPTERNLOGDZ256rmbi:
case X86::VPTERNLOGDZrmbi:
case X86::VPTERNLOGQZ128rmbi:
case X86::VPTERNLOGQZ256rmbi:
case X86::VPTERNLOGQZrmbi:
case X86::VPTERNLOGDZ128rmbikz:
case X86::VPTERNLOGDZ256rmbikz:
case X86::VPTERNLOGDZrmbikz:
case X86::VPTERNLOGQZ128rmbikz:
case X86::VPTERNLOGQZ256rmbikz:
case X86::VPTERNLOGQZrmbikz:
return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
case X86::VPMADD52HUQZ128r:
case X86::VPMADD52HUQZ128rk:
case X86::VPMADD52HUQZ128rkz:
case X86::VPMADD52HUQZ256r:
case X86::VPMADD52HUQZ256rk:
case X86::VPMADD52HUQZ256rkz:
case X86::VPMADD52HUQZr:
case X86::VPMADD52HUQZrk:
case X86::VPMADD52HUQZrkz:
case X86::VPMADD52LUQZ128r:
case X86::VPMADD52LUQZ128rk:
case X86::VPMADD52LUQZ128rkz:
case X86::VPMADD52LUQZ256r:
case X86::VPMADD52LUQZ256rk:
case X86::VPMADD52LUQZ256rkz:
case X86::VPMADD52LUQZr:
case X86::VPMADD52LUQZrk:
case X86::VPMADD52LUQZrkz: {
unsigned CommutableOpIdx1 = 2;
unsigned CommutableOpIdx2 = 3;
if (X86II::isKMasked(Desc.TSFlags)) {
// Skip the mask register.
++CommutableOpIdx1;
++CommutableOpIdx2;
}
if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
CommutableOpIdx1, CommutableOpIdx2))
return false;
if (!MI.getOperand(SrcOpIdx1).isReg() ||
!MI.getOperand(SrcOpIdx2).isReg())
// No idea.
return false;
return true;
}
default:
const X86InstrFMA3Group *FMA3Group = getFMA3Group(MI.getOpcode(),
MI.getDesc().TSFlags);
if (FMA3Group)
return findThreeSrcCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2,
FMA3Group->isIntrinsic());
// Handled masked instructions since we need to skip over the mask input
// and the preserved input.
if (X86II::isKMasked(Desc.TSFlags)) {
// First assume that the first input is the mask operand and skip past it.
unsigned CommutableOpIdx1 = Desc.getNumDefs() + 1;
unsigned CommutableOpIdx2 = Desc.getNumDefs() + 2;
// Check if the first input is tied. If there isn't one then we only
// need to skip the mask operand which we did above.
if ((MI.getDesc().getOperandConstraint(Desc.getNumDefs(),
MCOI::TIED_TO) != -1)) {
// If this is zero masking instruction with a tied operand, we need to
// move the first index back to the first input since this must
// be a 3 input instruction and we want the first two non-mask inputs.
// Otherwise this is a 2 input instruction with a preserved input and
// mask, so we need to move the indices to skip one more input.
if (X86II::isKMergeMasked(Desc.TSFlags)) {
++CommutableOpIdx1;
++CommutableOpIdx2;
} else {
--CommutableOpIdx1;
}
}
if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
CommutableOpIdx1, CommutableOpIdx2))
return false;
if (!MI.getOperand(SrcOpIdx1).isReg() ||
!MI.getOperand(SrcOpIdx2).isReg())
// No idea.
return false;
return true;
}
return TargetInstrInfo::findCommutedOpIndices(MI, SrcOpIdx1, SrcOpIdx2);
}
return false;
}
X86::CondCode X86::getCondFromBranchOpc(unsigned BrOpc) {
switch (BrOpc) {
default: return X86::COND_INVALID;
case X86::JE_1: return X86::COND_E;
case X86::JNE_1: return X86::COND_NE;
case X86::JL_1: return X86::COND_L;
case X86::JLE_1: return X86::COND_LE;
case X86::JG_1: return X86::COND_G;
case X86::JGE_1: return X86::COND_GE;
case X86::JB_1: return X86::COND_B;
case X86::JBE_1: return X86::COND_BE;
case X86::JA_1: return X86::COND_A;
case X86::JAE_1: return X86::COND_AE;
case X86::JS_1: return X86::COND_S;
case X86::JNS_1: return X86::COND_NS;
case X86::JP_1: return X86::COND_P;
case X86::JNP_1: return X86::COND_NP;
case X86::JO_1: return X86::COND_O;
case X86::JNO_1: return X86::COND_NO;
}
}
/// Return condition code of a SET opcode.
X86::CondCode X86::getCondFromSETOpc(unsigned Opc) {
switch (Opc) {
default: return X86::COND_INVALID;
case X86::SETAr: case X86::SETAm: return X86::COND_A;
case X86::SETAEr: case X86::SETAEm: return X86::COND_AE;
case X86::SETBr: case X86::SETBm: return X86::COND_B;
case X86::SETBEr: case X86::SETBEm: return X86::COND_BE;
case X86::SETEr: case X86::SETEm: return X86::COND_E;
case X86::SETGr: case X86::SETGm: return X86::COND_G;
case X86::SETGEr: case X86::SETGEm: return X86::COND_GE;
case X86::SETLr: case X86::SETLm: return X86::COND_L;
case X86::SETLEr: case X86::SETLEm: return X86::COND_LE;
case X86::SETNEr: case X86::SETNEm: return X86::COND_NE;
case X86::SETNOr: case X86::SETNOm: return X86::COND_NO;
case X86::SETNPr: case X86::SETNPm: return X86::COND_NP;
case X86::SETNSr: case X86::SETNSm: return X86::COND_NS;
case X86::SETOr: case X86::SETOm: return X86::COND_O;
case X86::SETPr: case X86::SETPm: return X86::COND_P;
case X86::SETSr: case X86::SETSm: return X86::COND_S;
}
}
/// Return condition code of a CMov opcode.
X86::CondCode X86::getCondFromCMovOpc(unsigned Opc) {
switch (Opc) {
default: return X86::COND_INVALID;
case X86::CMOVA16rm: case X86::CMOVA16rr: case X86::CMOVA32rm:
case X86::CMOVA32rr: case X86::CMOVA64rm: case X86::CMOVA64rr:
return X86::COND_A;
case X86::CMOVAE16rm: case X86::CMOVAE16rr: case X86::CMOVAE32rm:
case X86::CMOVAE32rr: case X86::CMOVAE64rm: case X86::CMOVAE64rr:
return X86::COND_AE;
case X86::CMOVB16rm: case X86::CMOVB16rr: case X86::CMOVB32rm:
case X86::CMOVB32rr: case X86::CMOVB64rm: case X86::CMOVB64rr:
return X86::COND_B;
case X86::CMOVBE16rm: case X86::CMOVBE16rr: case X86::CMOVBE32rm:
case X86::CMOVBE32rr: case X86::CMOVBE64rm: case X86::CMOVBE64rr:
return X86::COND_BE;
case X86::CMOVE16rm: case X86::CMOVE16rr: case X86::CMOVE32rm:
case X86::CMOVE32rr: case X86::CMOVE64rm: case X86::CMOVE64rr:
return X86::COND_E;
case X86::CMOVG16rm: case X86::CMOVG16rr: case X86::CMOVG32rm:
case X86::CMOVG32rr: case X86::CMOVG64rm: case X86::CMOVG64rr:
return X86::COND_G;
case X86::CMOVGE16rm: case X86::CMOVGE16rr: case X86::CMOVGE32rm:
case X86::CMOVGE32rr: case X86::CMOVGE64rm: case X86::CMOVGE64rr:
return X86::COND_GE;
case X86::CMOVL16rm: case X86::CMOVL16rr: case X86::CMOVL32rm:
case X86::CMOVL32rr: case X86::CMOVL64rm: case X86::CMOVL64rr:
return X86::COND_L;
case X86::CMOVLE16rm: case X86::CMOVLE16rr: case X86::CMOVLE32rm:
case X86::CMOVLE32rr: case X86::CMOVLE64rm: case X86::CMOVLE64rr:
return X86::COND_LE;
case X86::CMOVNE16rm: case X86::CMOVNE16rr: case X86::CMOVNE32rm:
case X86::CMOVNE32rr: case X86::CMOVNE64rm: case X86::CMOVNE64rr:
return X86::COND_NE;
case X86::CMOVNO16rm: case X86::CMOVNO16rr: case X86::CMOVNO32rm:
case X86::CMOVNO32rr: case X86::CMOVNO64rm: case X86::CMOVNO64rr:
return X86::COND_NO;
case X86::CMOVNP16rm: case X86::CMOVNP16rr: case X86::CMOVNP32rm:
case X86::CMOVNP32rr: case X86::CMOVNP64rm: case X86::CMOVNP64rr:
return X86::COND_NP;
case X86::CMOVNS16rm: case X86::CMOVNS16rr: case X86::CMOVNS32rm:
case X86::CMOVNS32rr: case X86::CMOVNS64rm: case X86::CMOVNS64rr:
return X86::COND_NS;
case X86::CMOVO16rm: case X86::CMOVO16rr: case X86::CMOVO32rm:
case X86::CMOVO32rr: case X86::CMOVO64rm: case X86::CMOVO64rr:
return X86::COND_O;
case X86::CMOVP16rm: case X86::CMOVP16rr: case X86::CMOVP32rm:
case X86::CMOVP32rr: case X86::CMOVP64rm: case X86::CMOVP64rr:
return X86::COND_P;
case X86::CMOVS16rm: case X86::CMOVS16rr: case X86::CMOVS32rm:
case X86::CMOVS32rr: case X86::CMOVS64rm: case X86::CMOVS64rr:
return X86::COND_S;
}
}
unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
switch (CC) {
default: llvm_unreachable("Illegal condition code!");
case X86::COND_E: return X86::JE_1;
case X86::COND_NE: return X86::JNE_1;
case X86::COND_L: return X86::JL_1;
case X86::COND_LE: return X86::JLE_1;
case X86::COND_G: return X86::JG_1;
case X86::COND_GE: return X86::JGE_1;
case X86::COND_B: return X86::JB_1;
case X86::COND_BE: return X86::JBE_1;
case X86::COND_A: return X86::JA_1;
case X86::COND_AE: return X86::JAE_1;
case X86::COND_S: return X86::JS_1;
case X86::COND_NS: return X86::JNS_1;
case X86::COND_P: return X86::JP_1;
case X86::COND_NP: return X86::JNP_1;
case X86::COND_O: return X86::JO_1;
case X86::COND_NO: return X86::JNO_1;
}
}
/// Return the inverse of the specified condition,
/// e.g. turning COND_E to COND_NE.
X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
switch (CC) {
default: llvm_unreachable("Illegal condition code!");
case X86::COND_E: return X86::COND_NE;
case X86::COND_NE: return X86::COND_E;
case X86::COND_L: return X86::COND_GE;
case X86::COND_LE: return X86::COND_G;
case X86::COND_G: return X86::COND_LE;
case X86::COND_GE: return X86::COND_L;
case X86::COND_B: return X86::COND_AE;
case X86::COND_BE: return X86::COND_A;
case X86::COND_A: return X86::COND_BE;
case X86::COND_AE: return X86::COND_B;
case X86::COND_S: return X86::COND_NS;
case X86::COND_NS: return X86::COND_S;
case X86::COND_P: return X86::COND_NP;
case X86::COND_NP: return X86::COND_P;
case X86::COND_O: return X86::COND_NO;
case X86::COND_NO: return X86::COND_O;
case X86::COND_NE_OR_P: return X86::COND_E_AND_NP;
case X86::COND_E_AND_NP: return X86::COND_NE_OR_P;
}
}
/// Assuming the flags are set by MI(a,b), return the condition code if we
/// modify the instructions such that flags are set by MI(b,a).
static X86::CondCode getSwappedCondition(X86::CondCode CC) {
switch (CC) {
default: return X86::COND_INVALID;
case X86::COND_E: return X86::COND_E;
case X86::COND_NE: return X86::COND_NE;
case X86::COND_L: return X86::COND_G;
case X86::COND_LE: return X86::COND_GE;
case X86::COND_G: return X86::COND_L;
case X86::COND_GE: return X86::COND_LE;
case X86::COND_B: return X86::COND_A;
case X86::COND_BE: return X86::COND_AE;
case X86::COND_A: return X86::COND_B;
case X86::COND_AE: return X86::COND_BE;
}
}
std::pair<X86::CondCode, bool>
X86::getX86ConditionCode(CmpInst::Predicate Predicate) {
X86::CondCode CC = X86::COND_INVALID;
bool NeedSwap = false;
switch (Predicate) {
default: break;
// Floating-point Predicates
case CmpInst::FCMP_UEQ: CC = X86::COND_E; break;
case CmpInst::FCMP_OLT: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_OGT: CC = X86::COND_A; break;
case CmpInst::FCMP_OLE: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_OGE: CC = X86::COND_AE; break;
case CmpInst::FCMP_UGT: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_ULT: CC = X86::COND_B; break;
case CmpInst::FCMP_UGE: NeedSwap = true; LLVM_FALLTHROUGH;
case CmpInst::FCMP_ULE: CC = X86::COND_BE; break;
case CmpInst::FCMP_ONE: CC = X86::COND_NE; break;
case CmpInst::FCMP_UNO: CC = X86::COND_P; break;
case CmpInst::FCMP_ORD: CC = X86::COND_NP; break;
case CmpInst::FCMP_OEQ: LLVM_FALLTHROUGH;
case CmpInst::FCMP_UNE: CC = X86::COND_INVALID; break;
// Integer Predicates
case CmpInst::ICMP_EQ: CC = X86::COND_E; break;
case CmpInst::ICMP_NE: CC = X86::COND_NE; break;
case CmpInst::ICMP_UGT: CC = X86::COND_A; break;
case CmpInst::ICMP_UGE: CC = X86::COND_AE; break;
case CmpInst::ICMP_ULT: CC = X86::COND_B; break;
case CmpInst::ICMP_ULE: CC = X86::COND_BE; break;
case CmpInst::ICMP_SGT: CC = X86::COND_G; break;
case CmpInst::ICMP_SGE: CC = X86::COND_GE; break;
case CmpInst::ICMP_SLT: CC = X86::COND_L; break;
case CmpInst::ICMP_SLE: CC = X86::COND_LE; break;
}
return std::make_pair(CC, NeedSwap);
}
/// Return a set opcode for the given condition and
/// whether it has memory operand.
unsigned X86::getSETFromCond(CondCode CC, bool HasMemoryOperand) {
static const uint16_t Opc[16][2] = {
{ X86::SETAr, X86::SETAm },
{ X86::SETAEr, X86::SETAEm },
{ X86::SETBr, X86::SETBm },
{ X86::SETBEr, X86::SETBEm },
{ X86::SETEr, X86::SETEm },
{ X86::SETGr, X86::SETGm },
{ X86::SETGEr, X86::SETGEm },
{ X86::SETLr, X86::SETLm },
{ X86::SETLEr, X86::SETLEm },
{ X86::SETNEr, X86::SETNEm },
{ X86::SETNOr, X86::SETNOm },
{ X86::SETNPr, X86::SETNPm },
{ X86::SETNSr, X86::SETNSm },
{ X86::SETOr, X86::SETOm },
{ X86::SETPr, X86::SETPm },
{ X86::SETSr, X86::SETSm }
};
assert(CC <= LAST_VALID_COND && "Can only handle standard cond codes");
return Opc[CC][HasMemoryOperand ? 1 : 0];
}
/// Return a cmov opcode for the given condition,
/// register size in bytes, and operand type.
unsigned X86::getCMovFromCond(CondCode CC, unsigned RegBytes,
bool HasMemoryOperand) {
static const uint16_t Opc[32][3] = {
{ X86::CMOVA16rr, X86::CMOVA32rr, X86::CMOVA64rr },
{ X86::CMOVAE16rr, X86::CMOVAE32rr, X86::CMOVAE64rr },
{ X86::CMOVB16rr, X86::CMOVB32rr, X86::CMOVB64rr },
{ X86::CMOVBE16rr, X86::CMOVBE32rr, X86::CMOVBE64rr },
{ X86::CMOVE16rr, X86::CMOVE32rr, X86::CMOVE64rr },
{ X86::CMOVG16rr, X86::CMOVG32rr, X86::CMOVG64rr },
{ X86::CMOVGE16rr, X86::CMOVGE32rr, X86::CMOVGE64rr },
{ X86::CMOVL16rr, X86::CMOVL32rr, X86::CMOVL64rr },
{ X86::CMOVLE16rr, X86::CMOVLE32rr, X86::CMOVLE64rr },
{ X86::CMOVNE16rr, X86::CMOVNE32rr, X86::CMOVNE64rr },
{ X86::CMOVNO16rr, X86::CMOVNO32rr, X86::CMOVNO64rr },
{ X86::CMOVNP16rr, X86::CMOVNP32rr, X86::CMOVNP64rr },
{ X86::CMOVNS16rr, X86::CMOVNS32rr, X86::CMOVNS64rr },
{ X86::CMOVO16rr, X86::CMOVO32rr, X86::CMOVO64rr },
{ X86::CMOVP16rr, X86::CMOVP32rr, X86::CMOVP64rr },
{ X86::CMOVS16rr, X86::CMOVS32rr, X86::CMOVS64rr },
{ X86::CMOVA16rm, X86::CMOVA32rm, X86::CMOVA64rm },
{ X86::CMOVAE16rm, X86::CMOVAE32rm, X86::CMOVAE64rm },
{ X86::CMOVB16rm, X86::CMOVB32rm, X86::CMOVB64rm },
{ X86::CMOVBE16rm, X86::CMOVBE32rm, X86::CMOVBE64rm },
{ X86::CMOVE16rm, X86::CMOVE32rm, X86::CMOVE64rm },
{ X86::CMOVG16rm, X86::CMOVG32rm, X86::CMOVG64rm },
{ X86::CMOVGE16rm, X86::CMOVGE32rm, X86::CMOVGE64rm },
{ X86::CMOVL16rm, X86::CMOVL32rm, X86::CMOVL64rm },
{ X86::CMOVLE16rm, X86::CMOVLE32rm, X86::CMOVLE64rm },
{ X86::CMOVNE16rm, X86::CMOVNE32rm, X86::CMOVNE64rm },
{ X86::CMOVNO16rm, X86::CMOVNO32rm, X86::CMOVNO64rm },
{ X86::CMOVNP16rm, X86::CMOVNP32rm, X86::CMOVNP64rm },
{ X86::CMOVNS16rm, X86::CMOVNS32rm, X86::CMOVNS64rm },
{ X86::CMOVO16rm, X86::CMOVO32rm, X86::CMOVO64rm },
{ X86::CMOVP16rm, X86::CMOVP32rm, X86::CMOVP64rm },
{ X86::CMOVS16rm, X86::CMOVS32rm, X86::CMOVS64rm }
};
assert(CC < 16 && "Can only handle standard cond codes");
unsigned Idx = HasMemoryOperand ? 16+CC : CC;
switch(RegBytes) {
default: llvm_unreachable("Illegal register size!");
case 2: return Opc[Idx][0];
case 4: return Opc[Idx][1];
case 8: return Opc[Idx][2];
}
}
/// Get the VPCMP immediate for the given condition.
unsigned X86::getVPCMPImmForCond(ISD::CondCode CC) {
switch (CC) {
default: llvm_unreachable("Unexpected SETCC condition");
case ISD::SETNE: return 4;
case ISD::SETEQ: return 0;
case ISD::SETULT:
case ISD::SETLT: return 1;
case ISD::SETUGT:
case ISD::SETGT: return 6;
case ISD::SETUGE:
case ISD::SETGE: return 5;
case ISD::SETULE:
case ISD::SETLE: return 2;
}
}
/// Get the VPCMP immediate if the opcodes are swapped.
unsigned X86::getSwappedVPCMPImm(unsigned Imm) {
switch (Imm) {
default: llvm_unreachable("Unreachable!");
case 0x01: Imm = 0x06; break; // LT -> NLE
case 0x02: Imm = 0x05; break; // LE -> NLT
case 0x05: Imm = 0x02; break; // NLT -> LE
case 0x06: Imm = 0x01; break; // NLE -> LT
case 0x00: // EQ
case 0x03: // FALSE
case 0x04: // NE
case 0x07: // TRUE
break;
}
return Imm;
}
/// Get the VPCOM immediate if the opcodes are swapped.
unsigned X86::getSwappedVPCOMImm(unsigned Imm) {
switch (Imm) {
default: llvm_unreachable("Unreachable!");
case 0x00: Imm = 0x02; break; // LT -> GT
case 0x01: Imm = 0x03; break; // LE -> GE
case 0x02: Imm = 0x00; break; // GT -> LT
case 0x03: Imm = 0x01; break; // GE -> LE
case 0x04: // EQ
case 0x05: // NE
case 0x06: // FALSE
case 0x07: // TRUE
break;
}
return Imm;
}
bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
if (!MI.isTerminator()) return false;
// Conditional branch is a special case.
if (MI.isBranch() && !MI.isBarrier())
return true;
if (!MI.isPredicable())
return true;
return !isPredicated(MI);
}
bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
case X86::TCRETURNdi:
case X86::TCRETURNri:
case X86::TCRETURNmi:
case X86::TCRETURNdi64:
case X86::TCRETURNri64:
case X86::TCRETURNmi64:
return true;
default:
return false;
}
}
bool X86InstrInfo::canMakeTailCallConditional(
SmallVectorImpl<MachineOperand> &BranchCond,
const MachineInstr &TailCall) const {
if (TailCall.getOpcode() != X86::TCRETURNdi &&
TailCall.getOpcode() != X86::TCRETURNdi64) {
// Only direct calls can be done with a conditional branch.
return false;
}
const MachineFunction *MF = TailCall.getParent()->getParent();
if (Subtarget.isTargetWin64() && MF->hasWinCFI()) {
// Conditional tail calls confuse the Win64 unwinder.
return false;
}
assert(BranchCond.size() == 1);
if (BranchCond[0].getImm() > X86::LAST_VALID_COND) {
// Can't make a conditional tail call with this condition.
return false;
}
const X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
if (X86FI->getTCReturnAddrDelta() != 0 ||
TailCall.getOperand(1).getImm() != 0) {
// A conditional tail call cannot do any stack adjustment.
return false;
}
return true;
}
void X86InstrInfo::replaceBranchWithTailCall(
MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond,
const MachineInstr &TailCall) const {
assert(canMakeTailCallConditional(BranchCond, TailCall));
MachineBasicBlock::iterator I = MBB.end();
while (I != MBB.begin()) {
--I;
if (I->isDebugInstr())
continue;
if (!I->isBranch())
assert(0 && "Can't find the branch to replace!");
X86::CondCode CC = X86::getCondFromBranchOpc(I->getOpcode());
assert(BranchCond.size() == 1);
if (CC != BranchCond[0].getImm())
continue;
break;
}
unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
: X86::TCRETURNdi64cc;
auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc));
MIB->addOperand(TailCall.getOperand(0)); // Destination.
MIB.addImm(0); // Stack offset (not used).
MIB->addOperand(BranchCond[0]); // Condition.
MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters.
// Add implicit uses and defs of all live regs potentially clobbered by the
// call. This way they still appear live across the call.
LivePhysRegs LiveRegs(getRegisterInfo());
LiveRegs.addLiveOuts(MBB);
SmallVector<std::pair<unsigned, const MachineOperand *>, 8> Clobbers;
LiveRegs.stepForward(*MIB, Clobbers);
for (const auto &C : Clobbers) {
MIB.addReg(C.first, RegState::Implicit);
MIB.addReg(C.first, RegState::Implicit | RegState::Define);
}
I->eraseFromParent();
}
// Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
// not be a fallthrough MBB now due to layout changes). Return nullptr if the
// fallthrough MBB cannot be identified.
static MachineBasicBlock *getFallThroughMBB(MachineBasicBlock *MBB,
MachineBasicBlock *TBB) {
// Look for non-EHPad successors other than TBB. If we find exactly one, it
// is the fallthrough MBB. If we find zero, then TBB is both the target MBB
// and fallthrough MBB. If we find more than one, we cannot identify the
// fallthrough MBB and should return nullptr.
MachineBasicBlock *FallthroughBB = nullptr;
for (auto SI = MBB->succ_begin(), SE = MBB->succ_end(); SI != SE; ++SI) {
if ((*SI)->isEHPad() || (*SI == TBB && FallthroughBB))
continue;
// Return a nullptr if we found more than one fallthrough successor.
if (FallthroughBB && FallthroughBB != TBB)
return nullptr;
FallthroughBB = *SI;
}
return FallthroughBB;
}
bool X86InstrInfo::AnalyzeBranchImpl(
MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
SmallVectorImpl<MachineInstr *> &CondBranches, bool AllowModify) const {
// Start from the bottom of the block and work up, examining the
// terminator instructions.
MachineBasicBlock::iterator I = MBB.end();
MachineBasicBlock::iterator UnCondBrIter = MBB.end();
while (I != MBB.begin()) {
--I;
if (I->isDebugInstr())
continue;
// Working from the bottom, when we see a non-terminator instruction, we're
// done.
if (!isUnpredicatedTerminator(*I))
break;
// A terminator that isn't a branch can't easily be handled by this
// analysis.
if (!I->isBranch())
return true;
// Handle unconditional branches.
if (I->getOpcode() == X86::JMP_1) {
UnCondBrIter = I;
if (!AllowModify) {
TBB = I->getOperand(0).getMBB();
continue;
}
// If the block has any instructions after a JMP, delete them.
while (std::next(I) != MBB.end())
std::next(I)->eraseFromParent();
Cond.clear();
FBB = nullptr;
// Delete the JMP if it's equivalent to a fall-through.
if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
TBB = nullptr;
I->eraseFromParent();
I = MBB.end();
UnCondBrIter = MBB.end();
continue;
}
// TBB is used to indicate the unconditional destination.
TBB = I->getOperand(0).getMBB();
continue;
}
// Handle conditional branches.
X86::CondCode BranchCode = X86::getCondFromBranchOpc(I->getOpcode());
if (BranchCode == X86::COND_INVALID)
return true; // Can't handle indirect branch.
// Working from the bottom, handle the first conditional branch.
if (Cond.empty()) {
MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
if (AllowModify && UnCondBrIter != MBB.end() &&
MBB.isLayoutSuccessor(TargetBB)) {
// If we can modify the code and it ends in something like:
//
// jCC L1
// jmp L2
// L1:
// ...
// L2:
//
// Then we can change this to:
//
// jnCC L2
// L1:
// ...
// L2:
//
// Which is a bit more efficient.
// We conditionally jump to the fall-through block.
BranchCode = GetOppositeBranchCondition(BranchCode);
unsigned JNCC = GetCondBranchFromCond(BranchCode);
MachineBasicBlock::iterator OldInst = I;
BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC))
.addMBB(UnCondBrIter->getOperand(0).getMBB());
BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_1))
.addMBB(TargetBB);
OldInst->eraseFromParent();
UnCondBrIter->eraseFromParent();
// Restart the analysis.
UnCondBrIter = MBB.end();
I = MBB.end();
continue;
}
FBB = TBB;
TBB = I->getOperand(0).getMBB();
Cond.push_back(MachineOperand::CreateImm(BranchCode));
CondBranches.push_back(&*I);
continue;
}
// Handle subsequent conditional branches. Only handle the case where all
// conditional branches branch to the same destination and their condition
// opcodes fit one of the special multi-branch idioms.
assert(Cond.size() == 1);
assert(TBB);
// If the conditions are the same, we can leave them alone.
X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
auto NewTBB = I->getOperand(0).getMBB();
if (OldBranchCode == BranchCode && TBB == NewTBB)
continue;
// If they differ, see if they fit one of the known patterns. Theoretically,
// we could handle more patterns here, but we shouldn't expect to see them
// if instruction selection has done a reasonable job.
if (TBB == NewTBB &&
((OldBranchCode == X86::COND_P && BranchCode == X86::COND_NE) ||
(OldBranchCode == X86::COND_NE && BranchCode == X86::COND_P))) {
BranchCode = X86::COND_NE_OR_P;
} else if ((OldBranchCode == X86::COND_NP && BranchCode == X86::COND_NE) ||
(OldBranchCode == X86::COND_E && BranchCode == X86::COND_P)) {
if (NewTBB != (FBB ? FBB : getFallThroughMBB(&MBB, TBB)))
return true;
// X86::COND_E_AND_NP usually has two different branch destinations.
//
// JP B1
// JE B2
// JMP B1
// B1:
// B2:
//
// Here this condition branches to B2 only if NP && E. It has another
// equivalent form:
//
// JNE B1
// JNP B2
// JMP B1
// B1:
// B2:
//
// Similarly it branches to B2 only if E && NP. That is why this condition
// is named with COND_E_AND_NP.
BranchCode = X86::COND_E_AND_NP;
} else
return true;
// Update the MachineOperand.
Cond[0].setImm(BranchCode);
CondBranches.push_back(&*I);
}
return false;
}
bool X86InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const {
SmallVector<MachineInstr *, 4> CondBranches;
return AnalyzeBranchImpl(MBB, TBB, FBB, Cond, CondBranches, AllowModify);
}
bool X86InstrInfo::analyzeBranchPredicate(MachineBasicBlock &MBB,
MachineBranchPredicate &MBP,
bool AllowModify) const {
using namespace std::placeholders;
SmallVector<MachineOperand, 4> Cond;
SmallVector<MachineInstr *, 4> CondBranches;
if (AnalyzeBranchImpl(MBB, MBP.TrueDest, MBP.FalseDest, Cond, CondBranches,
AllowModify))
return true;
if (Cond.size() != 1)
return true;
assert(MBP.TrueDest && "expected!");
if (!MBP.FalseDest)
MBP.FalseDest = MBB.getNextNode();
const TargetRegisterInfo *TRI = &getRegisterInfo();
MachineInstr *ConditionDef = nullptr;
bool SingleUseCondition = true;
for (auto I = std::next(MBB.rbegin()), E = MBB.rend(); I != E; ++I) {
if (I->modifiesRegister(X86::EFLAGS, TRI)) {
ConditionDef = &*I;
break;
}
if (I->readsRegister(X86::EFLAGS, TRI))
SingleUseCondition = false;
}
if (!ConditionDef)
return true;
if (SingleUseCondition) {
for (auto *Succ : MBB.successors())
if (Succ->isLiveIn(X86::EFLAGS))
SingleUseCondition = false;
}
MBP.ConditionDef = ConditionDef;
MBP.SingleUseCondition = SingleUseCondition;
// Currently we only recognize the simple pattern:
//
// test %reg, %reg
// je %label
//
const unsigned TestOpcode =
Subtarget.is64Bit() ? X86::TEST64rr : X86::TEST32rr;
if (ConditionDef->getOpcode() == TestOpcode &&
ConditionDef->getNumOperands() == 3 &&
ConditionDef->getOperand(0).isIdenticalTo(ConditionDef->getOperand(1)) &&
(Cond[0].getImm() == X86::COND_NE || Cond[0].getImm() == X86::COND_E)) {
MBP.LHS = ConditionDef->getOperand(0);
MBP.RHS = MachineOperand::CreateImm(0);
MBP.Predicate = Cond[0].getImm() == X86::COND_NE
? MachineBranchPredicate::PRED_NE
: MachineBranchPredicate::PRED_EQ;
return false;
}
return true;
}
unsigned X86InstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const {
assert(!BytesRemoved && "code size not handled");
MachineBasicBlock::iterator I = MBB.end();
unsigned Count = 0;
while (I != MBB.begin()) {
--I;
if (I->isDebugInstr())
continue;
if (I->getOpcode() != X86::JMP_1 &&
X86::getCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
break;
// Remove the branch.
I->eraseFromParent();
I = MBB.end();
++Count;
}
return Count;
}
unsigned X86InstrInfo::insertBranch(MachineBasicBlock &MBB,
MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
ArrayRef<MachineOperand> Cond,
const DebugLoc &DL,
int *BytesAdded) const {
// Shouldn't be a fall through.
assert(TBB && "insertBranch must not be told to insert a fallthrough");
assert((Cond.size() == 1 || Cond.size() == 0) &&
"X86 branch conditions have one component!");
assert(!BytesAdded && "code size not handled");
if (Cond.empty()) {
// Unconditional branch?
assert(!FBB && "Unconditional branch with multiple successors!");
BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(TBB);
return 1;
}
// If FBB is null, it is implied to be a fall-through block.
bool FallThru = FBB == nullptr;
// Conditional branch.
unsigned Count = 0;
X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
switch (CC) {
case X86::COND_NE_OR_P:
// Synthesize NE_OR_P with two branches.
BuildMI(&MBB, DL, get(X86::JNE_1)).addMBB(TBB);
++Count;
BuildMI(&MBB, DL, get(X86::JP_1)).addMBB(TBB);
++Count;
break;
case X86::COND_E_AND_NP:
// Use the next block of MBB as FBB if it is null.
if (FBB == nullptr) {
FBB = getFallThroughMBB(&MBB, TBB);
assert(FBB && "MBB cannot be the last block in function when the false "
"body is a fall-through.");
}
// Synthesize COND_E_AND_NP with two branches.
BuildMI(&MBB, DL, get(X86::JNE_1)).addMBB(FBB);
++Count;
BuildMI(&MBB, DL, get(X86::JNP_1)).addMBB(TBB);
++Count;
break;
default: {
unsigned Opc = GetCondBranchFromCond(CC);
BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
++Count;
}
}
if (!FallThru) {
// Two-way Conditional branch. Insert the second branch.
BuildMI(&MBB, DL, get(X86::JMP_1)).addMBB(FBB);
++Count;
}
return Count;
}
bool X86InstrInfo::
canInsertSelect(const MachineBasicBlock &MBB,
ArrayRef<MachineOperand> Cond,
unsigned TrueReg, unsigned FalseReg,
int &CondCycles, int &TrueCycles, int &FalseCycles) const {
// Not all subtargets have cmov instructions.
if (!Subtarget.hasCMov())
return false;
if (Cond.size() != 1)
return false;
// We cannot do the composite conditions, at least not in SSA form.
if ((X86::CondCode)Cond[0].getImm() > X86::COND_S)
return false;
// Check register classes.
const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterClass *RC =
RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
if (!RC)
return false;
// We have cmov instructions for 16, 32, and 64 bit general purpose registers.
if (X86::GR16RegClass.hasSubClassEq(RC) ||
X86::GR32RegClass.hasSubClassEq(RC) ||
X86::GR64RegClass.hasSubClassEq(RC)) {
// This latency applies to Pentium M, Merom, Wolfdale, Nehalem, and Sandy
// Bridge. Probably Ivy Bridge as well.
CondCycles = 2;
TrueCycles = 2;
FalseCycles = 2;
return true;
}
// Can't do vectors.
return false;
}
void X86InstrInfo::insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator I,
const DebugLoc &DL, unsigned DstReg,
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
unsigned FalseReg) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
assert(Cond.size() == 1 && "Invalid Cond array");
unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(),
TRI.getRegSizeInBits(RC) / 8,
false /*HasMemoryOperand*/);
BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg);
}
/// Test if the given register is a physical h register.
static bool isHReg(unsigned Reg) {
return X86::GR8_ABCD_HRegClass.contains(Reg);
}
// Try and copy between VR128/VR64 and GR64 registers.
static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg,
const X86Subtarget &Subtarget) {
bool HasAVX = Subtarget.hasAVX();
bool HasAVX512 = Subtarget.hasAVX512();
// SrcReg(MaskReg) -> DestReg(GR64)
// SrcReg(MaskReg) -> DestReg(GR32)
// All KMASK RegClasses hold the same k registers, can be tested against anyone.
if (X86::VK16RegClass.contains(SrcReg)) {
if (X86::GR64RegClass.contains(DestReg)) {
assert(Subtarget.hasBWI());
return X86::KMOVQrk;
}
if (X86::GR32RegClass.contains(DestReg))
return Subtarget.hasBWI() ? X86::KMOVDrk : X86::KMOVWrk;
}
// SrcReg(GR64) -> DestReg(MaskReg)
// SrcReg(GR32) -> DestReg(MaskReg)
// All KMASK RegClasses hold the same k registers, can be tested against anyone.
if (X86::VK16RegClass.contains(DestReg)) {
if (X86::GR64RegClass.contains(SrcReg)) {
assert(Subtarget.hasBWI());
return X86::KMOVQkr;
}
if (X86::GR32RegClass.contains(SrcReg))
return Subtarget.hasBWI() ? X86::KMOVDkr : X86::KMOVWkr;
}
// SrcReg(VR128) -> DestReg(GR64)
// SrcReg(VR64) -> DestReg(GR64)
// SrcReg(GR64) -> DestReg(VR128)
// SrcReg(GR64) -> DestReg(VR64)
if (X86::GR64RegClass.contains(DestReg)) {
if (X86::VR128XRegClass.contains(SrcReg))
// Copy from a VR128 register to a GR64 register.
return HasAVX512 ? X86::VMOVPQIto64Zrr :
HasAVX ? X86::VMOVPQIto64rr :
X86::MOVPQIto64rr;
if (X86::VR64RegClass.contains(SrcReg))
// Copy from a VR64 register to a GR64 register.
return X86::MMX_MOVD64from64rr;
} else if (X86::GR64RegClass.contains(SrcReg)) {
// Copy from a GR64 register to a VR128 register.
if (X86::VR128XRegClass.contains(DestReg))
return HasAVX512 ? X86::VMOV64toPQIZrr :
HasAVX ? X86::VMOV64toPQIrr :
X86::MOV64toPQIrr;
// Copy from a GR64 register to a VR64 register.
if (X86::VR64RegClass.contains(DestReg))
return X86::MMX_MOVD64to64rr;
}
// SrcReg(FR32) -> DestReg(GR32)
// SrcReg(GR32) -> DestReg(FR32)
if (X86::GR32RegClass.contains(DestReg) &&
X86::FR32XRegClass.contains(SrcReg))
// Copy from a FR32 register to a GR32 register.
return HasAVX512 ? X86::VMOVSS2DIZrr :
HasAVX ? X86::VMOVSS2DIrr :
X86::MOVSS2DIrr;
if (X86::FR32XRegClass.contains(DestReg) &&
X86::GR32RegClass.contains(SrcReg))
// Copy from a GR32 register to a FR32 register.
return HasAVX512 ? X86::VMOVDI2SSZrr :
HasAVX ? X86::VMOVDI2SSrr :
X86::MOVDI2SSrr;
return 0;
}
void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const DebugLoc &DL, unsigned DestReg,
unsigned SrcReg, bool KillSrc) const {
// First deal with the normal symmetric copies.
bool HasAVX = Subtarget.hasAVX();
bool HasVLX = Subtarget.hasVLX();
unsigned Opc = 0;
if (X86::GR64RegClass.contains(DestReg, SrcReg))
Opc = X86::MOV64rr;
else if (X86::GR32RegClass.contains(DestReg, SrcReg))
Opc = X86::MOV32rr;
else if (X86::GR16RegClass.contains(DestReg, SrcReg))
Opc = X86::MOV16rr;
else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
// Copying to or from a physical H register on x86-64 requires a NOREX
// move. Otherwise use a normal move.
if ((isHReg(DestReg) || isHReg(SrcReg)) &&
Subtarget.is64Bit()) {
Opc = X86::MOV8rr_NOREX;
// Both operands must be encodable without an REX prefix.
assert(X86::GR8_NOREXRegClass.contains(SrcReg, DestReg) &&
"8-bit H register can not be copied outside GR8_NOREX");
} else
Opc = X86::MOV8rr;
}
else if (X86::VR64RegClass.contains(DestReg, SrcReg))
Opc = X86::MMX_MOVQ64rr;
else if (X86::VR128XRegClass.contains(DestReg, SrcReg)) {
if (HasVLX)
Opc = X86::VMOVAPSZ128rr;
else if (X86::VR128RegClass.contains(DestReg, SrcReg))
Opc = HasAVX ? X86::VMOVAPSrr : X86::MOVAPSrr;
else {
// If this an extended register and we don't have VLX we need to use a
// 512-bit move.
Opc = X86::VMOVAPSZrr;
const TargetRegisterInfo *TRI = &getRegisterInfo();
DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_xmm,
&X86::VR512RegClass);
SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm,
&X86::VR512RegClass);
}
} else if (X86::VR256XRegClass.contains(DestReg, SrcReg)) {
if (HasVLX)
Opc = X86::VMOVAPSZ256rr;
else if (X86::VR256RegClass.contains(DestReg, SrcReg))
Opc = X86::VMOVAPSYrr;
else {
// If this an extended register and we don't have VLX we need to use a
// 512-bit move.
Opc = X86::VMOVAPSZrr;
const TargetRegisterInfo *TRI = &getRegisterInfo();
DestReg = TRI->getMatchingSuperReg(DestReg, X86::sub_ymm,
&X86::VR512RegClass);
SrcReg = TRI->getMatchingSuperReg(SrcReg, X86::sub_ymm,
&X86::VR512RegClass);
}
} else if (X86::VR512RegClass.contains(DestReg, SrcReg))
Opc = X86::VMOVAPSZrr;
// All KMASK RegClasses hold the same k registers, can be tested against anyone.
else if (X86::VK16RegClass.contains(DestReg, SrcReg))
Opc = Subtarget.hasBWI() ? X86::KMOVQkk : X86::KMOVWkk;
if (!Opc)
Opc = CopyToFromAsymmetricReg(DestReg, SrcReg, Subtarget);
if (Opc) {
BuildMI(MBB, MI, DL, get(Opc), DestReg)
.addReg(SrcReg, getKillRegState(KillSrc));
return;
}
if (SrcReg == X86::EFLAGS || DestReg == X86::EFLAGS) {
// FIXME: We use a fatal error here because historically LLVM has tried
// lower some of these physreg copies and we want to ensure we get
// reasonable bug reports if someone encounters a case no other testing
// found. This path should be removed after the LLVM 7 release.
report_fatal_error("Unable to copy EFLAGS physical register!");
}
LLVM_DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg) << " to "
<< RI.getName(DestReg) << '\n');
- llvm_unreachable("Cannot emit physreg copy instruction");
+ report_fatal_error("Cannot emit physreg copy instruction");
}
bool X86InstrInfo::isCopyInstr(const MachineInstr &MI,
const MachineOperand *&Src,
const MachineOperand *&Dest) const {
if (MI.isMoveReg()) {
Dest = &MI.getOperand(0);
Src = &MI.getOperand(1);
return true;
}
return false;
}
static unsigned getLoadStoreRegOpcode(unsigned Reg,
const TargetRegisterClass *RC,
bool isStackAligned,
const X86Subtarget &STI,
bool load) {
bool HasAVX = STI.hasAVX();
bool HasAVX512 = STI.hasAVX512();
bool HasVLX = STI.hasVLX();
switch (STI.getRegisterInfo()->getSpillSize(*RC)) {
default:
llvm_unreachable("Unknown spill size");
case 1:
assert(X86::GR8RegClass.hasSubClassEq(RC) && "Unknown 1-byte regclass");
if (STI.is64Bit())
// Copying to or from a physical H register on x86-64 requires a NOREX
// move. Otherwise use a normal move.
if (isHReg(Reg) || X86::GR8_ABCD_HRegClass.hasSubClassEq(RC))
return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
return load ? X86::MOV8rm : X86::MOV8mr;
case 2:
if (X86::VK16RegClass.hasSubClassEq(RC))
return load ? X86::KMOVWkm : X86::KMOVWmk;
assert(X86::GR16RegClass.hasSubClassEq(RC) && "Unknown 2-byte regclass");
return load ? X86::MOV16rm : X86::MOV16mr;
case 4:
if (X86::GR32RegClass.hasSubClassEq(RC))
return load ? X86::MOV32rm : X86::MOV32mr;
if (X86::FR32XRegClass.hasSubClassEq(RC))
return load ?
(HasAVX512 ? X86::VMOVSSZrm : HasAVX ? X86::VMOVSSrm : X86::MOVSSrm) :
(HasAVX512 ? X86::VMOVSSZmr : HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
if (X86::RFP32RegClass.hasSubClassEq(RC))
return load ? X86::LD_Fp32m : X86::ST_Fp32m;
if (X86::VK32RegClass.hasSubClassEq(RC)) {
assert(STI.hasBWI() && "KMOVD requires BWI");
return load ? X86::KMOVDkm : X86::KMOVDmk;
}
llvm_unreachable("Unknown 4-byte regclass");
case 8:
if (X86::GR64RegClass.hasSubClassEq(RC))
return load ? X86::MOV64rm : X86::MOV64mr;
if (X86::FR64XRegClass.hasSubClassEq(RC))
return load ?
(HasAVX512 ? X86::VMOVSDZrm : HasAVX ? X86::VMOVSDrm : X86::MOVSDrm) :
(HasAVX512 ? X86::VMOVSDZmr : HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
if (X86::VR64RegClass.hasSubClassEq(RC))
return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
if (X86::RFP64RegClass.hasSubClassEq(RC))
return load ? X86::LD_Fp64m : X86::ST_Fp64m;
if (X86::VK64RegClass.hasSubClassEq(RC)) {
assert(STI.hasBWI() && "KMOVQ requires BWI");
return load ? X86::KMOVQkm : X86::KMOVQmk;
}
llvm_unreachable("Unknown 8-byte regclass");
case 10:
assert(X86::RFP80RegClass.hasSubClassEq(RC) && "Unknown 10-byte regclass");
return load ? X86::LD_Fp80m : X86::ST_FpP80m;
case 16: {
if (X86::VR128XRegClass.hasSubClassEq(RC)) {
// If stack is realigned we can use aligned stores.
if (isStackAligned)
return load ?
(HasVLX ? X86::VMOVAPSZ128rm :
HasAVX512 ? X86::VMOVAPSZ128rm_NOVLX :
HasAVX ? X86::VMOVAPSrm :
X86::MOVAPSrm):
(HasVLX ? X86::VMOVAPSZ128mr :
HasAVX512 ? X86::VMOVAPSZ128mr_NOVLX :
HasAVX ? X86::VMOVAPSmr :
X86::MOVAPSmr);
else
return load ?
(HasVLX ? X86::VMOVUPSZ128rm :
HasAVX512 ? X86::VMOVUPSZ128rm_NOVLX :
HasAVX ? X86::VMOVUPSrm :
X86::MOVUPSrm):
(HasVLX ? X86::VMOVUPSZ128mr :
HasAVX512 ? X86::VMOVUPSZ128mr_NOVLX :
HasAVX ? X86::VMOVUPSmr :
X86::MOVUPSmr);
}
if (X86::BNDRRegClass.hasSubClassEq(RC)) {
if (STI.is64Bit())
return load ? X86::BNDMOV64rm : X86::BNDMOV64mr;
else
return load ? X86::BNDMOV32rm : X86::BNDMOV32mr;
}
llvm_unreachable("Unknown 16-byte regclass");
}
case 32:
assert(X86::VR256XRegClass.hasSubClassEq(RC) && "Unknown 32-byte regclass");
// If stack is realigned we can use aligned stores.
if (isStackAligned)
return load ?
(HasVLX ? X86::VMOVAPSZ256rm :
HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX :
X86::VMOVAPSYrm) :
(HasVLX ? X86::VMOVAPSZ256mr :
HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX :
X86::VMOVAPSYmr);
else
return load ?
(HasVLX ? X86::VMOVUPSZ256rm :
HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX :
X86::VMOVUPSYrm) :
(HasVLX ? X86::VMOVUPSZ256mr :
HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX :
X86::VMOVUPSYmr);
case 64:
assert(X86::VR512RegClass.hasSubClassEq(RC) && "Unknown 64-byte regclass");
assert(STI.hasAVX512() && "Using 512-bit register requires AVX512");
if (isStackAligned)
return load ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
else
return load ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
}
}
bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
int64_t &Offset,
const TargetRegisterInfo *TRI) const {
const MCInstrDesc &Desc = MemOp.getDesc();
int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
if (MemRefBegin < 0)
return false;
MemRefBegin += X86II::getOperandBias(Desc);
MachineOperand &BaseMO = MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
if (!BaseMO.isReg()) // Can be an MO_FrameIndex
return false;
BaseReg = BaseMO.getReg();
if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
return false;
if (MemOp.getOperand(MemRefBegin + X86::AddrIndexReg).getReg() !=
X86::NoRegister)
return false;
const MachineOperand &DispMO = MemOp.getOperand(MemRefBegin + X86::AddrDisp);
// Displacement can be symbolic
if (!DispMO.isImm())
return false;
Offset = DispMO.getImm();
return true;
}
static unsigned getStoreRegOpcode(unsigned SrcReg,
const TargetRegisterClass *RC,
bool isStackAligned,
const X86Subtarget &STI) {
return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, STI, false);
}
static unsigned getLoadRegOpcode(unsigned DestReg,
const TargetRegisterClass *RC,
bool isStackAligned,
const X86Subtarget &STI) {
return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, STI, true);
}
void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
"Stack slot too small for store");
unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
RI.canRealignStack(MF);
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
DebugLoc DL = MBB.findDebugLoc(MI);
addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx)
.addReg(SrcReg, getKillRegState(isKill));
}
void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = MMOBegin != MMOEnd &&
(*MMOBegin)->getAlignment() >= Alignment;
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.add(Addr[i]);
MIB.addReg(SrcReg, getKillRegState(isKill));
(*MIB).setMemRefs(MMOBegin, MMOEnd);
NewMIs.push_back(MIB);
}
void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIdx,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
RI.canRealignStack(MF);
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
DebugLoc DL = MBB.findDebugLoc(MI);
addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
}
void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
const TargetRegisterClass *RC,
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = MMOBegin != MMOEnd &&
(*MMOBegin)->getAlignment() >= Alignment;
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
DebugLoc DL;
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
MIB.add(Addr[i]);
(*MIB).setMemRefs(MMOBegin, MMOEnd);
NewMIs.push_back(MIB);
}
bool X86InstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const {
switch (MI.getOpcode()) {
default: break;
case X86::CMP64ri32:
case X86::CMP64ri8:
case X86::CMP32ri:
case X86::CMP32ri8:
case X86::CMP16ri:
case X86::CMP16ri8:
case X86::CMP8ri:
SrcReg = MI.getOperand(0).getReg();
SrcReg2 = 0;
if (MI.getOperand(1).isImm()) {
CmpMask = ~0;
CmpValue = MI.getOperand(1).getImm();
} else {
CmpMask = CmpValue = 0;
}
return true;
// A SUB can be used to perform comparison.
case X86::SUB64rm:
case X86::SUB32rm:
case X86::SUB16rm:
case X86::SUB8rm:
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
CmpMask = 0;
CmpValue = 0;
return true;
case X86::SUB64rr:
case X86::SUB32rr:
case X86::SUB16rr:
case X86::SUB8rr:
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = MI.getOperand(2).getReg();
CmpMask = 0;
CmpValue = 0;
return true;
case X86::SUB64ri32:
case X86::SUB64ri8:
case X86::SUB32ri:
case X86::SUB32ri8:
case X86::SUB16ri:
case X86::SUB16ri8:
case X86::SUB8ri:
SrcReg = MI.getOperand(1).getReg();
SrcReg2 = 0;
if (MI.getOperand(2).isImm()) {
CmpMask = ~0;
CmpValue = MI.getOperand(2).getImm();
} else {
CmpMask = CmpValue = 0;
}
return true;
case X86::CMP64rr:
case X86::CMP32rr:
case X86::CMP16rr:
case X86::CMP8rr:
SrcReg = MI.getOperand(0).getReg();
SrcReg2 = MI.getOperand(1).getReg();
CmpMask = 0;
CmpValue = 0;
return true;
case X86::TEST8rr:
case X86::TEST16rr:
case X86::TEST32rr:
case X86::TEST64rr:
SrcReg = MI.getOperand(0).getReg();
if (MI.getOperand(1).getReg() != SrcReg)
return false;
// Compare against zero.
SrcReg2 = 0;
CmpMask = ~0;
CmpValue = 0;
return true;
}
return false;
}
/// Check whether the first instruction, whose only
/// purpose is to update flags, can be made redundant.
/// CMPrr can be made redundant by SUBrr if the operands are the same.
/// This function can be extended later on.
/// SrcReg, SrcRegs: register operands for FlagI.
/// ImmValue: immediate for FlagI if it takes an immediate.
inline static bool isRedundantFlagInstr(MachineInstr &FlagI, unsigned SrcReg,
unsigned SrcReg2, int ImmMask,
int ImmValue, MachineInstr &OI) {
if (((FlagI.getOpcode() == X86::CMP64rr && OI.getOpcode() == X86::SUB64rr) ||
(FlagI.getOpcode() == X86::CMP32rr && OI.getOpcode() == X86::SUB32rr) ||
(FlagI.getOpcode() == X86::CMP16rr && OI.getOpcode() == X86::SUB16rr) ||
(FlagI.getOpcode() == X86::CMP8rr && OI.getOpcode() == X86::SUB8rr)) &&
((OI.getOperand(1).getReg() == SrcReg &&
OI.getOperand(2).getReg() == SrcReg2) ||
(OI.getOperand(1).getReg() == SrcReg2 &&
OI.getOperand(2).getReg() == SrcReg)))
return true;
if (ImmMask != 0 &&
((FlagI.getOpcode() == X86::CMP64ri32 &&
OI.getOpcode() == X86::SUB64ri32) ||
(FlagI.getOpcode() == X86::CMP64ri8 &&
OI.getOpcode() == X86::SUB64ri8) ||
(FlagI.getOpcode() == X86::CMP32ri && OI.getOpcode() == X86::SUB32ri) ||
(FlagI.getOpcode() == X86::CMP32ri8 &&
OI.getOpcode() == X86::SUB32ri8) ||
(FlagI.getOpcode() == X86::CMP16ri && OI.getOpcode() == X86::SUB16ri) ||
(FlagI.getOpcode() == X86::CMP16ri8 &&
OI.getOpcode() == X86::SUB16ri8) ||
(FlagI.getOpcode() == X86::CMP8ri && OI.getOpcode() == X86::SUB8ri)) &&
OI.getOperand(1).getReg() == SrcReg &&
OI.getOperand(2).getImm() == ImmValue)
return true;
return false;
}
/// Check whether the definition can be converted
/// to remove a comparison against zero.
inline static bool isDefConvertible(MachineInstr &MI) {
switch (MI.getOpcode()) {
default: return false;
// The shift instructions only modify ZF if their shift count is non-zero.
// N.B.: The processor truncates the shift count depending on the encoding.
case X86::SAR8ri: case X86::SAR16ri: case X86::SAR32ri:case X86::SAR64ri:
case X86::SHR8ri: case X86::SHR16ri: case X86::SHR32ri:case X86::SHR64ri:
return getTruncatedShiftCount(MI, 2) != 0;
// Some left shift instructions can be turned into LEA instructions but only
// if their flags aren't used. Avoid transforming such instructions.
case X86::SHL8ri: case X86::SHL16ri: case X86::SHL32ri:case X86::SHL64ri:{
unsigned ShAmt = getTruncatedShiftCount(MI, 2);
if (isTruncatedShiftCountForLEA(ShAmt)) return false;
return ShAmt != 0;
}
case X86::SHRD16rri8:case X86::SHRD32rri8:case X86::SHRD64rri8:
case X86::SHLD16rri8:case X86::SHLD32rri8:case X86::SHLD64rri8:
return getTruncatedShiftCount(MI, 3) != 0;
case X86::SUB64ri32: case X86::SUB64ri8: case X86::SUB32ri:
case X86::SUB32ri8: case X86::SUB16ri: case X86::SUB16ri8:
case X86::SUB8ri: case X86::SUB64rr: case X86::SUB32rr:
case X86::SUB16rr: case X86::SUB8rr: case X86::SUB64rm:
case X86::SUB32rm: case X86::SUB16rm: case X86::SUB8rm:
case X86::DEC64r: case X86::DEC32r: case X86::DEC16r: case X86::DEC8r:
case X86::ADD64ri32: case X86::ADD64ri8: case X86::ADD32ri:
case X86::ADD32ri8: case X86::ADD16ri: case X86::ADD16ri8:
case X86::ADD8ri: case X86::ADD64rr: case X86::ADD32rr:
case X86::ADD16rr: case X86::ADD8rr: case X86::ADD64rm:
case X86::ADD32rm: case X86::ADD16rm: case X86::ADD8rm:
case X86::INC64r: case X86::INC32r: case X86::INC16r: case X86::INC8r:
case X86::AND64ri32: case X86::AND64ri8: case X86::AND32ri:
case X86::AND32ri8: case X86::AND16ri: case X86::AND16ri8:
case X86::AND8ri: case X86::AND64rr: case X86::AND32rr:
case X86::AND16rr: case X86::AND8rr: case X86::AND64rm:
case X86::AND32rm: case X86::AND16rm: case X86::AND8rm:
case X86::XOR64ri32: case X86::XOR64ri8: case X86::XOR32ri:
case X86::XOR32ri8: case X86::XOR16ri: case X86::XOR16ri8:
case X86::XOR8ri: case X86::XOR64rr: case X86::XOR32rr:
case X86::XOR16rr: case X86::XOR8rr: case X86::XOR64rm:
case X86::XOR32rm: case X86::XOR16rm: case X86::XOR8rm:
case X86::OR64ri32: case X86::OR64ri8: case X86::OR32ri:
case X86::OR32ri8: case X86::OR16ri: case X86::OR16ri8:
case X86::OR8ri: case X86::OR64rr: case X86::OR32rr:
case X86::OR16rr: case X86::OR8rr: case X86::OR64rm:
case X86::OR32rm: case X86::OR16rm: case X86::OR8rm:
case X86::ADC64ri32: case X86::ADC64ri8: case X86::ADC32ri:
case X86::ADC32ri8: case X86::ADC16ri: case X86::ADC16ri8:
case X86::ADC8ri: case X86::ADC64rr: case X86::ADC32rr:
case X86::ADC16rr: case X86::ADC8rr: case X86::ADC64rm:
case X86::ADC32rm: case X86::ADC16rm: case X86::ADC8rm:
case X86::SBB64ri32: case X86::SBB64ri8: case X86::SBB32ri:
case X86::SBB32ri8: case X86::SBB16ri: case X86::SBB16ri8:
case X86::SBB8ri: case X86::SBB64rr: case X86::SBB32rr:
case X86::SBB16rr: case X86::SBB8rr: case X86::SBB64rm:
case X86::SBB32rm: case X86::SBB16rm: case X86::SBB8rm:
case X86::NEG8r: case X86::NEG16r: case X86::NEG32r: case X86::NEG64r:
case X86::SAR8r1: case X86::SAR16r1: case X86::SAR32r1:case X86::SAR64r1:
case X86::SHR8r1: case X86::SHR16r1: case X86::SHR32r1:case X86::SHR64r1:
case X86::SHL8r1: case X86::SHL16r1: case X86::SHL32r1:case X86::SHL64r1:
case X86::ANDN32rr: case X86::ANDN32rm:
case X86::ANDN64rr: case X86::ANDN64rm:
case X86::BEXTR32rr: case X86::BEXTR64rr:
case X86::BEXTR32rm: case X86::BEXTR64rm:
case X86::BLSI32rr: case X86::BLSI32rm:
case X86::BLSI64rr: case X86::BLSI64rm:
case X86::BLSMSK32rr:case X86::BLSMSK32rm:
case X86::BLSMSK64rr:case X86::BLSMSK64rm:
case X86::BLSR32rr: case X86::BLSR32rm:
case X86::BLSR64rr: case X86::BLSR64rm:
case X86::BZHI32rr: case X86::BZHI32rm:
case X86::BZHI64rr: case X86::BZHI64rm:
case X86::LZCNT16rr: case X86::LZCNT16rm:
case X86::LZCNT32rr: case X86::LZCNT32rm:
case X86::LZCNT64rr: case X86::LZCNT64rm:
case X86::POPCNT16rr:case X86::POPCNT16rm:
case X86::POPCNT32rr:case X86::POPCNT32rm:
case X86::POPCNT64rr:case X86::POPCNT64rm:
case X86::TZCNT16rr: case X86::TZCNT16rm:
case X86::TZCNT32rr: case X86::TZCNT32rm:
case X86::TZCNT64rr: case X86::TZCNT64rm:
case X86::BEXTRI32ri: case X86::BEXTRI32mi:
case X86::BEXTRI64ri: case X86::BEXTRI64mi:
case X86::BLCFILL32rr: case X86::BLCFILL32rm:
case X86::BLCFILL64rr: case X86::BLCFILL64rm:
case X86::BLCI32rr: case X86::BLCI32rm:
case X86::BLCI64rr: case X86::BLCI64rm:
case X86::BLCIC32rr: case X86::BLCIC32rm:
case X86::BLCIC64rr: case X86::BLCIC64rm:
case X86::BLCMSK32rr: case X86::BLCMSK32rm:
case X86::BLCMSK64rr: case X86::BLCMSK64rm:
case X86::BLCS32rr: case X86::BLCS32rm:
case X86::BLCS64rr: case X86::BLCS64rm:
case X86::BLSFILL32rr: case X86::BLSFILL32rm:
case X86::BLSFILL64rr: case X86::BLSFILL64rm:
case X86::BLSIC32rr: case X86::BLSIC32rm:
case X86::BLSIC64rr: case X86::BLSIC64rm:
return true;
}
}
/// Check whether the use can be converted to remove a comparison against zero.
static X86::CondCode isUseDefConvertible(MachineInstr &MI) {
switch (MI.getOpcode()) {
default: return X86::COND_INVALID;
case X86::LZCNT16rr: case X86::LZCNT16rm:
case X86::LZCNT32rr: case X86::LZCNT32rm:
case X86::LZCNT64rr: case X86::LZCNT64rm:
return X86::COND_B;
case X86::POPCNT16rr:case X86::POPCNT16rm:
case X86::POPCNT32rr:case X86::POPCNT32rm:
case X86::POPCNT64rr:case X86::POPCNT64rm:
return X86::COND_E;
case X86::TZCNT16rr: case X86::TZCNT16rm:
case X86::TZCNT32rr: case X86::TZCNT32rm:
case X86::TZCNT64rr: case X86::TZCNT64rm:
return X86::COND_B;
case X86::BSF16rr:
case X86::BSF16rm:
case X86::BSF32rr:
case X86::BSF32rm:
case X86::BSF64rr:
case X86::BSF64rm:
return X86::COND_E;
}
}
/// Check if there exists an earlier instruction that
/// operates on the same source operands and sets flags in the same way as
/// Compare; remove Compare if possible.
bool X86InstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int CmpMask,
int CmpValue,
const MachineRegisterInfo *MRI) const {
// Check whether we can replace SUB with CMP.
unsigned NewOpcode = 0;
switch (CmpInstr.getOpcode()) {
default: break;
case X86::SUB64ri32:
case X86::SUB64ri8:
case X86::SUB32ri:
case X86::SUB32ri8:
case X86::SUB16ri:
case X86::SUB16ri8:
case X86::SUB8ri:
case X86::SUB64rm:
case X86::SUB32rm:
case X86::SUB16rm:
case X86::SUB8rm:
case X86::SUB64rr:
case X86::SUB32rr:
case X86::SUB16rr:
case X86::SUB8rr: {
if (!MRI->use_nodbg_empty(CmpInstr.getOperand(0).getReg()))
return false;
// There is no use of the destination register, we can replace SUB with CMP.
switch (CmpInstr.getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::SUB64rm: NewOpcode = X86::CMP64rm; break;
case X86::SUB32rm: NewOpcode = X86::CMP32rm; break;
case X86::SUB16rm: NewOpcode = X86::CMP16rm; break;
case X86::SUB8rm: NewOpcode = X86::CMP8rm; break;
case X86::SUB64rr: NewOpcode = X86::CMP64rr; break;
case X86::SUB32rr: NewOpcode = X86::CMP32rr; break;
case X86::SUB16rr: NewOpcode = X86::CMP16rr; break;
case X86::SUB8rr: NewOpcode = X86::CMP8rr; break;
case X86::SUB64ri32: NewOpcode = X86::CMP64ri32; break;
case X86::SUB64ri8: NewOpcode = X86::CMP64ri8; break;
case X86::SUB32ri: NewOpcode = X86::CMP32ri; break;
case X86::SUB32ri8: NewOpcode = X86::CMP32ri8; break;
case X86::SUB16ri: NewOpcode = X86::CMP16ri; break;
case X86::SUB16ri8: NewOpcode = X86::CMP16ri8; break;
case X86::SUB8ri: NewOpcode = X86::CMP8ri; break;
}
CmpInstr.setDesc(get(NewOpcode));
CmpInstr.RemoveOperand(0);
// Fall through to optimize Cmp if Cmp is CMPrr or CMPri.
if (NewOpcode == X86::CMP64rm || NewOpcode == X86::CMP32rm ||
NewOpcode == X86::CMP16rm || NewOpcode == X86::CMP8rm)
return false;
}
}
// Get the unique definition of SrcReg.
MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
if (!MI) return false;
// CmpInstr is the first instruction of the BB.
MachineBasicBlock::iterator I = CmpInstr, Def = MI;
// If we are comparing against zero, check whether we can use MI to update
// EFLAGS. If MI is not in the same BB as CmpInstr, do not optimize.
bool IsCmpZero = (CmpMask != 0 && CmpValue == 0);
if (IsCmpZero && MI->getParent() != CmpInstr.getParent())
return false;
// If we have a use of the source register between the def and our compare
// instruction we can eliminate the compare iff the use sets EFLAGS in the
// right way.
bool ShouldUpdateCC = false;
X86::CondCode NewCC = X86::COND_INVALID;
if (IsCmpZero && !isDefConvertible(*MI)) {
// Scan forward from the use until we hit the use we're looking for or the
// compare instruction.
for (MachineBasicBlock::iterator J = MI;; ++J) {
// Do we have a convertible instruction?
NewCC = isUseDefConvertible(*J);
if (NewCC != X86::COND_INVALID && J->getOperand(1).isReg() &&
J->getOperand(1).getReg() == SrcReg) {
assert(J->definesRegister(X86::EFLAGS) && "Must be an EFLAGS def!");
ShouldUpdateCC = true; // Update CC later on.
// This is not a def of SrcReg, but still a def of EFLAGS. Keep going
// with the new def.
Def = J;
MI = &*Def;
break;
}
if (J == I)
return false;
}
}
// We are searching for an earlier instruction that can make CmpInstr
// redundant and that instruction will be saved in Sub.
MachineInstr *Sub = nullptr;
const TargetRegisterInfo *TRI = &getRegisterInfo();
// We iterate backward, starting from the instruction before CmpInstr and
// stop when reaching the definition of a source register or done with the BB.
// RI points to the instruction before CmpInstr.
// If the definition is in this basic block, RE points to the definition;
// otherwise, RE is the rend of the basic block.
MachineBasicBlock::reverse_iterator
RI = ++I.getReverse(),
RE = CmpInstr.getParent() == MI->getParent()
? Def.getReverse() /* points to MI */
: CmpInstr.getParent()->rend();
MachineInstr *Movr0Inst = nullptr;
for (; RI != RE; ++RI) {
MachineInstr &Instr = *RI;
// Check whether CmpInstr can be made redundant by the current instruction.
if (!IsCmpZero && isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpMask,
CmpValue, Instr)) {
Sub = &Instr;
break;
}
if (Instr.modifiesRegister(X86::EFLAGS, TRI) ||
Instr.readsRegister(X86::EFLAGS, TRI)) {
// This instruction modifies or uses EFLAGS.
// MOV32r0 etc. are implemented with xor which clobbers condition code.
// They are safe to move up, if the definition to EFLAGS is dead and
// earlier instructions do not read or write EFLAGS.
if (!Movr0Inst && Instr.getOpcode() == X86::MOV32r0 &&
Instr.registerDefIsDead(X86::EFLAGS, TRI)) {
Movr0Inst = &Instr;
continue;
}
// We can't remove CmpInstr.
return false;
}
}
// Return false if no candidates exist.
if (!IsCmpZero && !Sub)
return false;
bool IsSwapped = (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 &&
Sub->getOperand(2).getReg() == SrcReg);
// Scan forward from the instruction after CmpInstr for uses of EFLAGS.
// It is safe to remove CmpInstr if EFLAGS is redefined or killed.
// If we are done with the basic block, we need to check whether EFLAGS is
// live-out.
bool IsSafe = false;
SmallVector<std::pair<MachineInstr*, unsigned /*NewOpc*/>, 4> OpsToUpdate;
MachineBasicBlock::iterator E = CmpInstr.getParent()->end();
for (++I; I != E; ++I) {
const MachineInstr &Instr = *I;
bool ModifyEFLAGS = Instr.modifiesRegister(X86::EFLAGS, TRI);
bool UseEFLAGS = Instr.readsRegister(X86::EFLAGS, TRI);
// We should check the usage if this instruction uses and updates EFLAGS.
if (!UseEFLAGS && ModifyEFLAGS) {
// It is safe to remove CmpInstr if EFLAGS is updated again.
IsSafe = true;
break;
}
if (!UseEFLAGS && !ModifyEFLAGS)
continue;
// EFLAGS is used by this instruction.
X86::CondCode OldCC = X86::COND_INVALID;
bool OpcIsSET = false;
if (IsCmpZero || IsSwapped) {
// We decode the condition code from opcode.
if (Instr.isBranch())
OldCC = X86::getCondFromBranchOpc(Instr.getOpcode());
else {
OldCC = X86::getCondFromSETOpc(Instr.getOpcode());
if (OldCC != X86::COND_INVALID)
OpcIsSET = true;
else
OldCC = X86::getCondFromCMovOpc(Instr.getOpcode());
}
if (OldCC == X86::COND_INVALID) return false;
}
X86::CondCode ReplacementCC = X86::COND_INVALID;
if (IsCmpZero) {
switch (OldCC) {
default: break;
case X86::COND_A: case X86::COND_AE:
case X86::COND_B: case X86::COND_BE:
case X86::COND_G: case X86::COND_GE:
case X86::COND_L: case X86::COND_LE:
case X86::COND_O: case X86::COND_NO:
// CF and OF are used, we can't perform this optimization.
return false;
}
// If we're updating the condition code check if we have to reverse the
// condition.
if (ShouldUpdateCC)
switch (OldCC) {
default:
return false;
case X86::COND_E:
ReplacementCC = NewCC;
break;
case X86::COND_NE:
ReplacementCC = GetOppositeBranchCondition(NewCC);
break;
}
} else if (IsSwapped) {
// If we have SUB(r1, r2) and CMP(r2, r1), the condition code needs
// to be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
// We swap the condition code and synthesize the new opcode.
ReplacementCC = getSwappedCondition(OldCC);
if (ReplacementCC == X86::COND_INVALID) return false;
}
if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) {
// Synthesize the new opcode.
bool HasMemoryOperand = Instr.hasOneMemOperand();
unsigned NewOpc;
if (Instr.isBranch())
NewOpc = GetCondBranchFromCond(ReplacementCC);
else if(OpcIsSET)
NewOpc = getSETFromCond(ReplacementCC, HasMemoryOperand);
else {
unsigned DstReg = Instr.getOperand(0).getReg();
const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
NewOpc = getCMovFromCond(ReplacementCC, TRI->getRegSizeInBits(*DstRC)/8,
HasMemoryOperand);
}
// Push the MachineInstr to OpsToUpdate.
// If it is safe to remove CmpInstr, the condition code of these
// instructions will be modified.
OpsToUpdate.push_back(std::make_pair(&*I, NewOpc));
}
if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
// It is safe to remove CmpInstr if EFLAGS is updated again or killed.
IsSafe = true;
break;
}
}
// If EFLAGS is not killed nor re-defined, we should check whether it is
// live-out. If it is live-out, do not optimize.
if ((IsCmpZero || IsSwapped) && !IsSafe) {
MachineBasicBlock *MBB = CmpInstr.getParent();
for (MachineBasicBlock *Successor : MBB->successors())
if (Successor->isLiveIn(X86::EFLAGS))
return false;
}
// The instruction to be updated is either Sub or MI.
Sub = IsCmpZero ? MI : Sub;
// Move Movr0Inst to the appropriate place before Sub.
if (Movr0Inst) {
// Look backwards until we find a def that doesn't use the current EFLAGS.
Def = Sub;
MachineBasicBlock::reverse_iterator InsertI = Def.getReverse(),
InsertE = Sub->getParent()->rend();
for (; InsertI != InsertE; ++InsertI) {
MachineInstr *Instr = &*InsertI;
if (!Instr->readsRegister(X86::EFLAGS, TRI) &&
Instr->modifiesRegister(X86::EFLAGS, TRI)) {
Sub->getParent()->remove(Movr0Inst);
Instr->getParent()->insert(MachineBasicBlock::iterator(Instr),
Movr0Inst);
break;
}
}
if (InsertI == InsertE)
return false;
}
// Make sure Sub instruction defines EFLAGS and mark the def live.
unsigned i = 0, e = Sub->getNumOperands();
for (; i != e; ++i) {
MachineOperand &MO = Sub->getOperand(i);
if (MO.isReg() && MO.isDef() && MO.getReg() == X86::EFLAGS) {
MO.setIsDead(false);
break;
}
}
assert(i != e && "Unable to locate a def EFLAGS operand");
CmpInstr.eraseFromParent();
// Modify the condition code of instructions in OpsToUpdate.
for (auto &Op : OpsToUpdate)
Op.first->setDesc(get(Op.second));
return true;
}
/// Try to remove the load by folding it to a register
/// operand at the use. We fold the load instructions if load defines a virtual
/// register, the virtual register is used once in the same BB, and the
/// instructions in-between do not load or store, and have no side effects.
MachineInstr *X86InstrInfo::optimizeLoadInstr(MachineInstr &MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const {
// Check whether we can move DefMI here.
DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
assert(DefMI);
bool SawStore = false;
if (!DefMI->isSafeToMove(nullptr, SawStore))
return nullptr;
// Collect information about virtual register operands of MI.
SmallVector<unsigned, 1> SrcOperandIds;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (Reg != FoldAsLoadDefReg)
continue;
// Do not fold if we have a subreg use or a def.
if (MO.getSubReg() || MO.isDef())
return nullptr;
SrcOperandIds.push_back(i);
}
if (SrcOperandIds.empty())
return nullptr;
// Check whether we can fold the def into SrcOperandId.
if (MachineInstr *FoldMI = foldMemoryOperand(MI, SrcOperandIds, *DefMI)) {
FoldAsLoadDefReg = 0;
return FoldMI;
}
return nullptr;
}
/// Expand a single-def pseudo instruction to a two-addr
/// instruction with two undef reads of the register being defined.
/// This is used for mapping:
/// %xmm4 = V_SET0
/// to:
/// %xmm4 = PXORrr undef %xmm4, undef %xmm4
///
static bool Expand2AddrUndef(MachineInstrBuilder &MIB,
const MCInstrDesc &Desc) {
assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
unsigned Reg = MIB->getOperand(0).getReg();
MIB->setDesc(Desc);
// MachineInstr::addOperand() will insert explicit operands before any
// implicit operands.
MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
// But we don't trust that.
assert(MIB->getOperand(1).getReg() == Reg &&
MIB->getOperand(2).getReg() == Reg && "Misplaced operand");
return true;
}
/// Expand a single-def pseudo instruction to a two-addr
/// instruction with two %k0 reads.
/// This is used for mapping:
/// %k4 = K_SET1
/// to:
/// %k4 = KXNORrr %k0, %k0
static bool Expand2AddrKreg(MachineInstrBuilder &MIB,
const MCInstrDesc &Desc, unsigned Reg) {
assert(Desc.getNumOperands() == 3 && "Expected two-addr instruction.");
MIB->setDesc(Desc);
MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef);
return true;
}
static bool expandMOV32r1(MachineInstrBuilder &MIB, const TargetInstrInfo &TII,
bool MinusOne) {
MachineBasicBlock &MBB = *MIB->getParent();
DebugLoc DL = MIB->getDebugLoc();
unsigned Reg = MIB->getOperand(0).getReg();
// Insert the XOR.
BuildMI(MBB, MIB.getInstr(), DL, TII.get(X86::XOR32rr), Reg)
.addReg(Reg, RegState::Undef)
.addReg(Reg, RegState::Undef);
// Turn the pseudo into an INC or DEC.
MIB->setDesc(TII.get(MinusOne ? X86::DEC32r : X86::INC32r));
MIB.addReg(Reg);
return true;
}
static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB,
const TargetInstrInfo &TII,
const X86Subtarget &Subtarget) {
MachineBasicBlock &MBB = *MIB->getParent();
DebugLoc DL = MIB->getDebugLoc();
int64_t Imm = MIB->getOperand(1).getImm();
assert(Imm != 0 && "Using push/pop for 0 is not efficient.");
MachineBasicBlock::iterator I = MIB.getInstr();
int StackAdjustment;
if (Subtarget.is64Bit()) {
assert(MIB->getOpcode() == X86::MOV64ImmSExti8 ||
MIB->getOpcode() == X86::MOV32ImmSExti8);
// Can't use push/pop lowering if the function might write to the red zone.
X86MachineFunctionInfo *X86FI =
MBB.getParent()->getInfo<X86MachineFunctionInfo>();
if (X86FI->getUsesRedZone()) {
MIB->setDesc(TII.get(MIB->getOpcode() ==
X86::MOV32ImmSExti8 ? X86::MOV32ri : X86::MOV64ri));
return true;
}
// 64-bit mode doesn't have 32-bit push/pop, so use 64-bit operations and
// widen the register if necessary.
StackAdjustment = 8;
BuildMI(MBB, I, DL, TII.get(X86::PUSH64i8)).addImm(Imm);
MIB->setDesc(TII.get(X86::POP64r));
MIB->getOperand(0)
.setReg(getX86SubSuperRegister(MIB->getOperand(0).getReg(), 64));
} else {
assert(MIB->getOpcode() == X86::MOV32ImmSExti8);
StackAdjustment = 4;
BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm);
MIB->setDesc(TII.get(X86::POP32r));
}
// Build CFI if necessary.
MachineFunction &MF = *MBB.getParent();
const X86FrameLowering *TFL = Subtarget.getFrameLowering();
bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
bool NeedsDwarfCFI =
!IsWin64Prologue &&
(MF.getMMI().hasDebugInfo() || MF.getFunction().needsUnwindTableEntry());
bool EmitCFI = !TFL->hasFP(MF) && NeedsDwarfCFI;
if (EmitCFI) {
TFL->BuildCFI(MBB, I, DL,
MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment));
TFL->BuildCFI(MBB, std::next(I), DL,
MCCFIInstruction::createAdjustCfaOffset(nullptr, -StackAdjustment));
}
return true;
}
// LoadStackGuard has so far only been implemented for 64-bit MachO. Different
// code sequence is needed for other targets.
static void expandLoadStackGuard(MachineInstrBuilder &MIB,
const TargetInstrInfo &TII) {
MachineBasicBlock &MBB = *MIB->getParent();
DebugLoc DL = MIB->getDebugLoc();
unsigned Reg = MIB->getOperand(0).getReg();
const GlobalValue *GV =
cast<GlobalValue>((*MIB->memoperands_begin())->getValue());
auto Flags = MachineMemOperand::MOLoad |
MachineMemOperand::MODereferenceable |
MachineMemOperand::MOInvariant;
MachineMemOperand *MMO = MBB.getParent()->getMachineMemOperand(
MachinePointerInfo::getGOT(*MBB.getParent()), Flags, 8, 8);
MachineBasicBlock::iterator I = MIB.getInstr();
BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1)
.addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0)
.addMemOperand(MMO);
MIB->setDebugLoc(DL);
MIB->setDesc(TII.get(X86::MOV64rm));
MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
}
static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) {
MachineBasicBlock &MBB = *MIB->getParent();
MachineFunction &MF = *MBB.getParent();
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
unsigned XorOp =
MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
MIB->setDesc(TII.get(XorOp));
MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef);
return true;
}
// This is used to handle spills for 128/256-bit registers when we have AVX512,
// but not VLX. If it uses an extended register we need to use an instruction
// that loads the lower 128/256-bit, but is available with only AVX512F.
static bool expandNOVLXLoad(MachineInstrBuilder &MIB,
const TargetRegisterInfo *TRI,
const MCInstrDesc &LoadDesc,
const MCInstrDesc &BroadcastDesc,
unsigned SubIdx) {
unsigned DestReg = MIB->getOperand(0).getReg();
// Check if DestReg is XMM16-31 or YMM16-31.
if (TRI->getEncodingValue(DestReg) < 16) {
// We can use a normal VEX encoded load.
MIB->setDesc(LoadDesc);
} else {
// Use a 128/256-bit VBROADCAST instruction.
MIB->setDesc(BroadcastDesc);
// Change the destination to a 512-bit register.
DestReg = TRI->getMatchingSuperReg(DestReg, SubIdx, &X86::VR512RegClass);
MIB->getOperand(0).setReg(DestReg);
}
return true;
}
// This is used to handle spills for 128/256-bit registers when we have AVX512,
// but not VLX. If it uses an extended register we need to use an instruction
// that stores the lower 128/256-bit, but is available with only AVX512F.
static bool expandNOVLXStore(MachineInstrBuilder &MIB,
const TargetRegisterInfo *TRI,
const MCInstrDesc &StoreDesc,
const MCInstrDesc &ExtractDesc,
unsigned SubIdx) {
unsigned SrcReg = MIB->getOperand(X86::AddrNumOperands).getReg();
// Check if DestReg is XMM16-31 or YMM16-31.
if (TRI->getEncodingValue(SrcReg) < 16) {
// We can use a normal VEX encoded store.
MIB->setDesc(StoreDesc);
} else {
// Use a VEXTRACTF instruction.
MIB->setDesc(ExtractDesc);
// Change the destination to a 512-bit register.
SrcReg = TRI->getMatchingSuperReg(SrcReg, SubIdx, &X86::VR512RegClass);
MIB->getOperand(X86::AddrNumOperands).setReg(SrcReg);
MIB.addImm(0x0); // Append immediate to extract from the lower bits.
}
return true;
}
bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
bool HasAVX = Subtarget.hasAVX();
MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
switch (MI.getOpcode()) {
case X86::MOV32r0:
return Expand2AddrUndef(MIB, get(X86::XOR32rr));
case X86::MOV32r1:
return expandMOV32r1(MIB, *this, /*MinusOne=*/ false);
case X86::MOV32r_1:
return expandMOV32r1(MIB, *this, /*MinusOne=*/ true);
case X86::MOV32ImmSExti8:
case X86::MOV64ImmSExti8:
return ExpandMOVImmSExti8(MIB, *this, Subtarget);
case X86::SETB_C8r:
return Expand2AddrUndef(MIB, get(X86::SBB8rr));
case X86::SETB_C16r:
return Expand2AddrUndef(MIB, get(X86::SBB16rr));
case X86::SETB_C32r:
return Expand2AddrUndef(MIB, get(X86::SBB32rr));
case X86::SETB_C64r:
return Expand2AddrUndef(MIB, get(X86::SBB64rr));
case X86::MMX_SET0:
return Expand2AddrUndef(MIB, get(X86::MMX_PXORirr));
case X86::V_SET0:
case X86::FsFLD0SS:
case X86::FsFLD0SD:
return Expand2AddrUndef(MIB, get(HasAVX ? X86::VXORPSrr : X86::XORPSrr));
case X86::AVX_SET0: {
assert(HasAVX && "AVX not supported");
const TargetRegisterInfo *TRI = &getRegisterInfo();
unsigned SrcReg = MIB->getOperand(0).getReg();
unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
MIB->getOperand(0).setReg(XReg);
Expand2AddrUndef(MIB, get(X86::VXORPSrr));
MIB.addReg(SrcReg, RegState::ImplicitDefine);
return true;
}
case X86::AVX512_128_SET0:
case X86::AVX512_FsFLD0SS:
case X86::AVX512_FsFLD0SD: {
bool HasVLX = Subtarget.hasVLX();
unsigned SrcReg = MIB->getOperand(0).getReg();
const TargetRegisterInfo *TRI = &getRegisterInfo();
if (HasVLX || TRI->getEncodingValue(SrcReg) < 16)
return Expand2AddrUndef(MIB,
get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
// Extended register without VLX. Use a larger XOR.
SrcReg =
TRI->getMatchingSuperReg(SrcReg, X86::sub_xmm, &X86::VR512RegClass);
MIB->getOperand(0).setReg(SrcReg);
return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
}
case X86::AVX512_256_SET0:
case X86::AVX512_512_SET0: {
bool HasVLX = Subtarget.hasVLX();
unsigned SrcReg = MIB->getOperand(0).getReg();
const TargetRegisterInfo *TRI = &getRegisterInfo();
if (HasVLX || TRI->getEncodingValue(SrcReg) < 16) {
unsigned XReg = TRI->getSubReg(SrcReg, X86::sub_xmm);
MIB->getOperand(0).setReg(XReg);
Expand2AddrUndef(MIB,
get(HasVLX ? X86::VPXORDZ128rr : X86::VXORPSrr));
MIB.addReg(SrcReg, RegState::ImplicitDefine);
return true;
}
return Expand2AddrUndef(MIB, get(X86::VPXORDZrr));
}
case X86::V_SETALLONES:
return Expand2AddrUndef(MIB, get(HasAVX ? X86::VPCMPEQDrr : X86::PCMPEQDrr));
case X86::AVX2_SETALLONES:
return Expand2AddrUndef(MIB, get(X86::VPCMPEQDYrr));
case X86::AVX1_SETALLONES: {
unsigned Reg = MIB->getOperand(0).getReg();
// VCMPPSYrri with an immediate 0xf should produce VCMPTRUEPS.
MIB->setDesc(get(X86::VCMPPSYrri));
MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xf);
return true;
}
case X86::AVX512_512_SETALLONES: {
unsigned Reg = MIB->getOperand(0).getReg();
MIB->setDesc(get(X86::VPTERNLOGDZrri));
// VPTERNLOGD needs 3 register inputs and an immediate.
// 0xff will return 1s for any input.
MIB.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef)
.addReg(Reg, RegState::Undef).addImm(0xff);
return true;
}
case X86::AVX512_512_SEXT_MASK_32:
case X86::AVX512_512_SEXT_MASK_64: {
unsigned Reg = MIB->getOperand(0).getReg();
unsigned MaskReg = MIB->getOperand(1).getReg();
unsigned MaskState = getRegState(MIB->getOperand(1));
unsigned Opc = (MI.getOpcode() == X86::AVX512_512_SEXT_MASK_64) ?
X86::VPTERNLOGQZrrikz : X86::VPTERNLOGDZrrikz;
MI.RemoveOperand(1);
MIB->setDesc(get(Opc));
// VPTERNLOG needs 3 register inputs and an immediate.
// 0xff will return 1s for any input.
MIB.addReg(Reg, RegState::Undef).addReg(MaskReg, MaskState)
.addReg(Reg, RegState::Undef).addReg(Reg, RegState::Undef).addImm(0xff);
return true;
}
case X86::VMOVAPSZ128rm_NOVLX:
return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSrm),
get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
case X86::VMOVUPSZ128rm_NOVLX:
return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSrm),
get(X86::VBROADCASTF32X4rm), X86::sub_xmm);
case X86::VMOVAPSZ256rm_NOVLX:
return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVAPSYrm),
get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
case X86::VMOVUPSZ256rm_NOVLX:
return expandNOVLXLoad(MIB, &getRegisterInfo(), get(X86::VMOVUPSYrm),
get(X86::VBROADCASTF64X4rm), X86::sub_ymm);
case X86::VMOVAPSZ128mr_NOVLX:
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSmr),
get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
case X86::VMOVUPSZ128mr_NOVLX:
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSmr),
get(X86::VEXTRACTF32x4Zmr), X86::sub_xmm);
case X86::VMOVAPSZ256mr_NOVLX:
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVAPSYmr),
get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
case X86::VMOVUPSZ256mr_NOVLX:
return expandNOVLXStore(MIB, &getRegisterInfo(), get(X86::VMOVUPSYmr),
get(X86::VEXTRACTF64x4Zmr), X86::sub_ymm);
case X86::MOV32ri64:
MI.setDesc(get(X86::MOV32ri));
return true;
// KNL does not recognize dependency-breaking idioms for mask registers,
// so kxnor %k1, %k1, %k2 has a RAW dependence on %k1.
// Using %k0 as the undef input register is a performance heuristic based
// on the assumption that %k0 is used less frequently than the other mask
// registers, since it is not usable as a write mask.
// FIXME: A more advanced approach would be to choose the best input mask
// register based on context.
case X86::KSET0W: return Expand2AddrKreg(MIB, get(X86::KXORWrr), X86::K0);
case X86::KSET0D: return Expand2AddrKreg(MIB, get(X86::KXORDrr), X86::K0);
case X86::KSET0Q: return Expand2AddrKreg(MIB, get(X86::KXORQrr), X86::K0);
case X86::KSET1W: return Expand2AddrKreg(MIB, get(X86::KXNORWrr), X86::K0);
case X86::KSET1D: return Expand2AddrKreg(MIB, get(X86::KXNORDrr), X86::K0);
case X86::KSET1Q: return Expand2AddrKreg(MIB, get(X86::KXNORQrr), X86::K0);
case TargetOpcode::LOAD_STACK_GUARD:
expandLoadStackGuard(MIB, *this);
return true;
case X86::XOR64_FP:
case X86::XOR32_FP:
return expandXorFP(MIB, *this);
}
return false;
}
/// Return true for all instructions that only update
/// the first 32 or 64-bits of the destination register and leave the rest
/// unmodified. This can be used to avoid folding loads if the instructions
/// only update part of the destination register, and the non-updated part is
/// not needed. e.g. cvtss2sd, sqrtss. Unfolding the load from these
/// instructions breaks the partial register dependency and it can improve
/// performance. e.g.:
///
/// movss (%rdi), %xmm0
/// cvtss2sd %xmm0, %xmm0
///
/// Instead of
/// cvtss2sd (%rdi), %xmm0
///
/// FIXME: This should be turned into a TSFlags.
///
static bool hasPartialRegUpdate(unsigned Opcode,
const X86Subtarget &Subtarget) {
switch (Opcode) {
case X86::CVTSI2SSrr:
case X86::CVTSI2SSrm:
case X86::CVTSI642SSrr:
case X86::CVTSI642SSrm:
case X86::CVTSI2SDrr:
case X86::CVTSI2SDrm:
case X86::CVTSI642SDrr:
case X86::CVTSI642SDrm:
case X86::CVTSD2SSrr:
case X86::CVTSD2SSrm:
case X86::CVTSS2SDrr:
case X86::CVTSS2SDrm:
case X86::MOVHPDrm:
case X86::MOVHPSrm:
case X86::MOVLPDrm:
case X86::MOVLPSrm:
case X86::RCPSSr:
case X86::RCPSSm:
case X86::RCPSSr_Int:
case X86::RCPSSm_Int:
case X86::ROUNDSDr:
case X86::ROUNDSDm:
case X86::ROUNDSSr:
case X86::ROUNDSSm:
case X86::RSQRTSSr:
case X86::RSQRTSSm:
case X86::RSQRTSSr_Int:
case X86::RSQRTSSm_Int:
case X86::SQRTSSr:
case X86::SQRTSSm:
case X86::SQRTSSr_Int:
case X86::SQRTSSm_Int:
case X86::SQRTSDr:
case X86::SQRTSDm:
case X86::SQRTSDr_Int:
case X86::SQRTSDm_Int:
return true;
// GPR
case X86::POPCNT32rm:
case X86::POPCNT32rr:
case X86::POPCNT64rm:
case X86::POPCNT64rr:
return Subtarget.hasPOPCNTFalseDeps();
case X86::LZCNT32rm:
case X86::LZCNT32rr:
case X86::LZCNT64rm:
case X86::LZCNT64rr:
case X86::TZCNT32rm:
case X86::TZCNT32rr:
case X86::TZCNT64rm:
case X86::TZCNT64rr:
return Subtarget.hasLZCNTFalseDeps();
}
return false;
}
/// Inform the BreakFalseDeps pass how many idle
/// instructions we would like before a partial register update.
unsigned X86InstrInfo::getPartialRegUpdateClearance(
const MachineInstr &MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const {
if (OpNum != 0 || !hasPartialRegUpdate(MI.getOpcode(), Subtarget))
return 0;
// If MI is marked as reading Reg, the partial register update is wanted.
const MachineOperand &MO = MI.getOperand(0);
unsigned Reg = MO.getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
if (MO.readsReg() || MI.readsVirtualRegister(Reg))
return 0;
} else {
if (MI.readsRegister(Reg, TRI))
return 0;
}
// If any instructions in the clearance range are reading Reg, insert a
// dependency breaking instruction, which is inexpensive and is likely to
// be hidden in other instruction's cycles.
return PartialRegUpdateClearance;
}
// Return true for any instruction the copies the high bits of the first source
// operand into the unused high bits of the destination operand.
static bool hasUndefRegUpdate(unsigned Opcode) {
switch (Opcode) {
case X86::VCVTSI2SSrr:
case X86::VCVTSI2SSrm:
case X86::VCVTSI2SSrr_Int:
case X86::VCVTSI2SSrm_Int:
case X86::VCVTSI642SSrr:
case X86::VCVTSI642SSrm:
case X86::VCVTSI642SSrr_Int:
case X86::VCVTSI642SSrm_Int:
case X86::VCVTSI2SDrr:
case X86::VCVTSI2SDrm:
case X86::VCVTSI2SDrr_Int:
case X86::VCVTSI2SDrm_Int:
case X86::VCVTSI642SDrr:
case X86::VCVTSI642SDrm:
case X86::VCVTSI642SDrr_Int:
case X86::VCVTSI642SDrm_Int:
case X86::VCVTSD2SSrr:
case X86::VCVTSD2SSrm:
case X86::VCVTSD2SSrr_Int:
case X86::VCVTSD2SSrm_Int:
case X86::VCVTSS2SDrr:
case X86::VCVTSS2SDrm:
case X86::VCVTSS2SDrr_Int:
case X86::VCVTSS2SDrm_Int:
case X86::VRCPSSr:
case X86::VRCPSSr_Int:
case X86::VRCPSSm:
case X86::VRCPSSm_Int:
case X86::VROUNDSDr:
case X86::VROUNDSDm:
case X86::VROUNDSDr_Int:
case X86::VROUNDSDm_Int:
case X86::VROUNDSSr:
case X86::VROUNDSSm:
case X86::VROUNDSSr_Int:
case X86::VROUNDSSm_Int:
case X86::VRSQRTSSr:
case X86::VRSQRTSSr_Int:
case X86::VRSQRTSSm:
case X86::VRSQRTSSm_Int:
case X86::VSQRTSSr:
case X86::VSQRTSSr_Int:
case X86::VSQRTSSm:
case X86::VSQRTSSm_Int:
case X86::VSQRTSDr:
case X86::VSQRTSDr_Int:
case X86::VSQRTSDm:
case X86::VSQRTSDm_Int:
// AVX-512
case X86::VCVTSI2SSZrr:
case X86::VCVTSI2SSZrm:
case X86::VCVTSI2SSZrr_Int:
case X86::VCVTSI2SSZrrb_Int:
case X86::VCVTSI2SSZrm_Int:
case X86::VCVTSI642SSZrr:
case X86::VCVTSI642SSZrm:
case X86::VCVTSI642SSZrr_Int:
case X86::VCVTSI642SSZrrb_Int:
case X86::VCVTSI642SSZrm_Int:
case X86::VCVTSI2SDZrr:
case X86::VCVTSI2SDZrm:
case X86::VCVTSI2SDZrr_Int:
case X86::VCVTSI2SDZrrb_Int:
case X86::VCVTSI2SDZrm_Int:
case X86::VCVTSI642SDZrr:
case X86::VCVTSI642SDZrm:
case X86::VCVTSI642SDZrr_Int:
case X86::VCVTSI642SDZrrb_Int:
case X86::VCVTSI642SDZrm_Int:
case X86::VCVTUSI2SSZrr:
case X86::VCVTUSI2SSZrm:
case X86::VCVTUSI2SSZrr_Int:
case X86::VCVTUSI2SSZrrb_Int:
case X86::VCVTUSI2SSZrm_Int:
case X86::VCVTUSI642SSZrr:
case X86::VCVTUSI642SSZrm:
case X86::VCVTUSI642SSZrr_Int:
case X86::VCVTUSI642SSZrrb_Int:
case X86::VCVTUSI642SSZrm_Int:
case X86::VCVTUSI2SDZrr:
case X86::VCVTUSI2SDZrm:
case X86::VCVTUSI2SDZrr_Int:
case X86::VCVTUSI2SDZrm_Int:
case X86::VCVTUSI642SDZrr:
case X86::VCVTUSI642SDZrm:
case X86::VCVTUSI642SDZrr_Int:
case X86::VCVTUSI642SDZrrb_Int:
case X86::VCVTUSI642SDZrm_Int:
case X86::VCVTSD2SSZrr:
case X86::VCVTSD2SSZrr_Int:
case X86::VCVTSD2SSZrrb_Int:
case X86::VCVTSD2SSZrm:
case X86::VCVTSD2SSZrm_Int:
case X86::VCVTSS2SDZrr:
case X86::VCVTSS2SDZrr_Int:
case X86::VCVTSS2SDZrrb_Int:
case X86::VCVTSS2SDZrm:
case X86::VCVTSS2SDZrm_Int:
case X86::VGETEXPSDZr:
case X86::VGETEXPSDZrb:
case X86::VGETEXPSDZm:
case X86::VGETEXPSSZr:
case X86::VGETEXPSSZrb:
case X86::VGETEXPSSZm:
case X86::VGETMANTSDZrri:
case X86::VGETMANTSDZrrib:
case X86::VGETMANTSDZrmi:
case X86::VGETMANTSSZrri:
case X86::VGETMANTSSZrrib:
case X86::VGETMANTSSZrmi:
case X86::VRNDSCALESDZr:
case X86::VRNDSCALESDZr_Int:
case X86::VRNDSCALESDZrb_Int:
case X86::VRNDSCALESDZm:
case X86::VRNDSCALESDZm_Int:
case X86::VRNDSCALESSZr:
case X86::VRNDSCALESSZr_Int:
case X86::VRNDSCALESSZrb_Int:
case X86::VRNDSCALESSZm:
case X86::VRNDSCALESSZm_Int:
case X86::VRCP14SDZrr:
case X86::VRCP14SDZrm:
case X86::VRCP14SSZrr:
case X86::VRCP14SSZrm:
case X86::VRCP28SDZr:
case X86::VRCP28SDZrb:
case X86::VRCP28SDZm:
case X86::VRCP28SSZr:
case X86::VRCP28SSZrb:
case X86::VRCP28SSZm:
case X86::VREDUCESSZrmi:
case X86::VREDUCESSZrri:
case X86::VREDUCESSZrrib:
case X86::VRSQRT14SDZrr:
case X86::VRSQRT14SDZrm:
case X86::VRSQRT14SSZrr:
case X86::VRSQRT14SSZrm:
case X86::VRSQRT28SDZr:
case X86::VRSQRT28SDZrb:
case X86::VRSQRT28SDZm:
case X86::VRSQRT28SSZr:
case X86::VRSQRT28SSZrb:
case X86::VRSQRT28SSZm:
case X86::VSQRTSSZr:
case X86::VSQRTSSZr_Int:
case X86::VSQRTSSZrb_Int:
case X86::VSQRTSSZm:
case X86::VSQRTSSZm_Int:
case X86::VSQRTSDZr:
case X86::VSQRTSDZr_Int:
case X86::VSQRTSDZrb_Int:
case X86::VSQRTSDZm:
case X86::VSQRTSDZm_Int:
return true;
}
return false;
}
/// Inform the BreakFalseDeps pass how many idle instructions we would like
/// before certain undef register reads.
///
/// This catches the VCVTSI2SD family of instructions:
///
/// vcvtsi2sdq %rax, undef %xmm0, %xmm14
///
/// We should to be careful *not* to catch VXOR idioms which are presumably
/// handled specially in the pipeline:
///
/// vxorps undef %xmm1, undef %xmm1, %xmm1
///
/// Like getPartialRegUpdateClearance, this makes a strong assumption that the
/// high bits that are passed-through are not live.
unsigned
X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum,
const TargetRegisterInfo *TRI) const {
if (!hasUndefRegUpdate(MI.getOpcode()))
return 0;
// Set the OpNum parameter to the first source operand.
OpNum = 1;
const MachineOperand &MO = MI.getOperand(OpNum);
if (MO.isUndef() && TargetRegisterInfo::isPhysicalRegister(MO.getReg())) {
return UndefRegClearance;
}
return 0;
}
void X86InstrInfo::breakPartialRegDependency(
MachineInstr &MI, unsigned OpNum, const TargetRegisterInfo *TRI) const {
unsigned Reg = MI.getOperand(OpNum).getReg();
// If MI kills this register, the false dependence is already broken.
if (MI.killsRegister(Reg, TRI))
return;
if (X86::VR128RegClass.contains(Reg)) {
// These instructions are all floating point domain, so xorps is the best
// choice.
unsigned Opc = Subtarget.hasAVX() ? X86::VXORPSrr : X86::XORPSrr;
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(Opc), Reg)
.addReg(Reg, RegState::Undef)
.addReg(Reg, RegState::Undef);
MI.addRegisterKilled(Reg, TRI, true);
} else if (X86::VR256RegClass.contains(Reg)) {
// Use vxorps to clear the full ymm register.
// It wants to read and write the xmm sub-register.
unsigned XReg = TRI->getSubReg(Reg, X86::sub_xmm);
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::VXORPSrr), XReg)
.addReg(XReg, RegState::Undef)
.addReg(XReg, RegState::Undef)
.addReg(Reg, RegState::ImplicitDefine);
MI.addRegisterKilled(Reg, TRI, true);
} else if (X86::GR64RegClass.contains(Reg)) {
// Using XOR32rr because it has shorter encoding and zeros up the upper bits
// as well.
unsigned XReg = TRI->getSubReg(Reg, X86::sub_32bit);
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), XReg)
.addReg(XReg, RegState::Undef)
.addReg(XReg, RegState::Undef)
.addReg(Reg, RegState::ImplicitDefine);
MI.addRegisterKilled(Reg, TRI, true);
} else if (X86::GR32RegClass.contains(Reg)) {
BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), get(X86::XOR32rr), Reg)
.addReg(Reg, RegState::Undef)
.addReg(Reg, RegState::Undef);
MI.addRegisterKilled(Reg, TRI, true);
}
}
static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs,
int PtrOffset = 0) {
unsigned NumAddrOps = MOs.size();
if (NumAddrOps < 4) {
// FrameIndex only - add an immediate offset (whether its zero or not).
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB.add(MOs[i]);
addOffset(MIB, PtrOffset);
} else {
// General Memory Addressing - we need to add any offset to an existing
// offset.
assert(MOs.size() == 5 && "Unexpected memory operand list length");
for (unsigned i = 0; i != NumAddrOps; ++i) {
const MachineOperand &MO = MOs[i];
if (i == 3 && PtrOffset != 0) {
MIB.addDisp(MO, PtrOffset);
} else {
MIB.add(MO);
}
}
}
}
static void updateOperandRegConstraints(MachineFunction &MF,
MachineInstr &NewMI,
const TargetInstrInfo &TII) {
MachineRegisterInfo &MRI = MF.getRegInfo();
const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
for (int Idx : llvm::seq<int>(0, NewMI.getNumOperands())) {
MachineOperand &MO = NewMI.getOperand(Idx);
// We only need to update constraints on virtual register operands.
if (!MO.isReg())
continue;
unsigned Reg = MO.getReg();
if (!TRI.isVirtualRegister(Reg))
continue;
auto *NewRC = MRI.constrainRegClass(
Reg, TII.getRegClass(NewMI.getDesc(), Idx, &TRI, MF));
if (!NewRC) {
LLVM_DEBUG(
dbgs() << "WARNING: Unable to update register constraint for operand "
<< Idx << " of instruction:\n";
NewMI.dump(); dbgs() << "\n");
}
}
}
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr &MI,
const TargetInstrInfo &TII) {
// Create the base instruction with the memory operand as the first part.
// Omit the implicit operands, something BuildMI can't do.
MachineInstr *NewMI =
MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
addOperands(MIB, MOs);
// Loop over the rest of the ri operands, converting them over.
unsigned NumOps = MI.getDesc().getNumOperands() - 2;
for (unsigned i = 0; i != NumOps; ++i) {
MachineOperand &MO = MI.getOperand(i + 2);
MIB.add(MO);
}
for (unsigned i = NumOps + 2, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
MIB.add(MO);
}
updateOperandRegConstraints(MF, *NewMI, TII);
MachineBasicBlock *MBB = InsertPt->getParent();
MBB->insert(InsertPt, NewMI);
return MIB;
}
static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
unsigned OpNo, ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr &MI, const TargetInstrInfo &TII,
int PtrOffset = 0) {
// Omit the implicit operands, something BuildMI can't do.
MachineInstr *NewMI =
MF.CreateMachineInstr(TII.get(Opcode), MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI);
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (i == OpNo) {
assert(MO.isReg() && "Expected to fold into reg operand!");
addOperands(MIB, MOs, PtrOffset);
} else {
MIB.add(MO);
}
}
updateOperandRegConstraints(MF, *NewMI, TII);
MachineBasicBlock *MBB = InsertPt->getParent();
MBB->insert(InsertPt, NewMI);
return MIB;
}
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr &MI) {
MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
MI.getDebugLoc(), TII.get(Opcode));
addOperands(MIB, MOs);
return MIB.addImm(0);
}
MachineInstr *X86InstrInfo::foldMemoryOperandCustom(
MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
unsigned Size, unsigned Align) const {
switch (MI.getOpcode()) {
case X86::INSERTPSrr:
case X86::VINSERTPSrr:
case X86::VINSERTPSZrr:
// Attempt to convert the load of inserted vector into a fold load
// of a single float.
if (OpNum == 2) {
unsigned Imm = MI.getOperand(MI.getNumOperands() - 1).getImm();
unsigned ZMask = Imm & 15;
unsigned DstIdx = (Imm >> 4) & 3;
unsigned SrcIdx = (Imm >> 6) & 3;
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if (Size <= RCSize && 4 <= Align) {
int PtrOffset = SrcIdx * 4;
unsigned NewImm = (DstIdx << 4) | ZMask;
unsigned NewOpCode =
(MI.getOpcode() == X86::VINSERTPSZrr) ? X86::VINSERTPSZrm :
(MI.getOpcode() == X86::VINSERTPSrr) ? X86::VINSERTPSrm :
X86::INSERTPSrm;
MachineInstr *NewMI =
FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, PtrOffset);
NewMI->getOperand(NewMI->getNumOperands() - 1).setImm(NewImm);
return NewMI;
}
}
break;
case X86::MOVHLPSrr:
case X86::VMOVHLPSrr:
case X86::VMOVHLPSZrr:
// Move the upper 64-bits of the second operand to the lower 64-bits.
// To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
// TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
if (OpNum == 2) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if (Size <= RCSize && 8 <= Align) {
unsigned NewOpCode =
(MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm :
(MI.getOpcode() == X86::VMOVHLPSrr) ? X86::VMOVLPSrm :
X86::MOVLPSrm;
MachineInstr *NewMI =
FuseInst(MF, NewOpCode, OpNum, MOs, InsertPt, MI, *this, 8);
return NewMI;
}
}
break;
};
return nullptr;
}
static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF, MachineInstr &MI) {
if (MF.getFunction().optForSize() || !hasUndefRegUpdate(MI.getOpcode()) ||
!MI.getOperand(1).isReg())
return false;
// The are two cases we need to handle depending on where in the pipeline
// the folding attempt is being made.
// -Register has the undef flag set.
// -Register is produced by the IMPLICIT_DEF instruction.
if (MI.getOperand(1).isUndef())
return true;
MachineRegisterInfo &RegInfo = MF.getRegInfo();
MachineInstr *VRegDef = RegInfo.getUniqueVRegDef(MI.getOperand(1).getReg());
return VRegDef && VRegDef->isImplicitDef();
}
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, unsigned OpNum,
ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
unsigned Size, unsigned Align, bool AllowCommute) const {
bool isSlowTwoMemOps = Subtarget.slowTwoMemOps();
bool isTwoAddrFold = false;
// For CPUs that favor the register form of a call or push,
// do not fold loads into calls or pushes, unless optimizing for size
// aggressively.
if (isSlowTwoMemOps && !MF.getFunction().optForMinSize() &&
(MI.getOpcode() == X86::CALL32r || MI.getOpcode() == X86::CALL64r ||
MI.getOpcode() == X86::PUSH16r || MI.getOpcode() == X86::PUSH32r ||
MI.getOpcode() == X86::PUSH64r))
return nullptr;
// Avoid partial and undef register update stalls unless optimizing for size.
if (!MF.getFunction().optForSize() &&
(hasPartialRegUpdate(MI.getOpcode(), Subtarget) ||
shouldPreventUndefRegUpdateMemFold(MF, MI)))
return nullptr;
unsigned NumOps = MI.getDesc().getNumOperands();
bool isTwoAddr =
NumOps > 1 && MI.getDesc().getOperandConstraint(1, MCOI::TIED_TO) != -1;
// FIXME: AsmPrinter doesn't know how to handle
// X86II::MO_GOT_ABSOLUTE_ADDRESS after folding.
if (MI.getOpcode() == X86::ADD32ri &&
MI.getOperand(2).getTargetFlags() == X86II::MO_GOT_ABSOLUTE_ADDRESS)
return nullptr;
// GOTTPOFF relocation loads can only be folded into add instructions.
// FIXME: Need to exclude other relocations that only support specific
// instructions.
if (MOs.size() == X86::AddrNumOperands &&
MOs[X86::AddrDisp].getTargetFlags() == X86II::MO_GOTTPOFF &&
MI.getOpcode() != X86::ADD64rr)
return nullptr;
MachineInstr *NewMI = nullptr;
// Attempt to fold any custom cases we have.
if (MachineInstr *CustomMI =
foldMemoryOperandCustom(MF, MI, OpNum, MOs, InsertPt, Size, Align))
return CustomMI;
const X86MemoryFoldTableEntry *I = nullptr;
// Folding a memory location into the two-address part of a two-address
// instruction is different than folding it other places. It requires
// replacing the *two* registers with the memory location.
if (isTwoAddr && NumOps >= 2 && OpNum < 2 && MI.getOperand(0).isReg() &&
MI.getOperand(1).isReg() &&
MI.getOperand(0).getReg() == MI.getOperand(1).getReg()) {
I = lookupTwoAddrFoldTable(MI.getOpcode());
isTwoAddrFold = true;
} else {
if (OpNum == 0) {
if (MI.getOpcode() == X86::MOV32r0) {
NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
if (NewMI)
return NewMI;
}
}
I = lookupFoldTable(MI.getOpcode(), OpNum);
}
if (I != nullptr) {
unsigned Opcode = I->DstOp;
unsigned MinAlign = (I->Flags & TB_ALIGN_MASK) >> TB_ALIGN_SHIFT;
if (Align < MinAlign)
return nullptr;
bool NarrowToMOV32rm = false;
if (Size) {
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum,
&RI, MF);
unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if (Size < RCSize) {
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
return nullptr;
// If this is a 64-bit load, but the spill slot is 32, then we can do
// a 32-bit load which is implicitly zero-extended. This likely is
// due to live interval analysis remat'ing a load from stack slot.
if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
return nullptr;
Opcode = X86::MOV32rm;
NarrowToMOV32rm = true;
}
}
if (isTwoAddrFold)
NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this);
else
NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this);
if (NarrowToMOV32rm) {
// If this is the special case where we use a MOV32rm to load a 32-bit
// value and zero-extend the top bits. Change the destination register
// to a 32-bit one.
unsigned DstReg = NewMI->getOperand(0).getReg();
if (TargetRegisterInfo::isPhysicalRegister(DstReg))
NewMI->getOperand(0).setReg(RI.getSubReg(DstReg, X86::sub_32bit));
else
NewMI->getOperand(0).setSubReg(X86::sub_32bit);
}
return NewMI;
}
// If the instruction and target operand are commutable, commute the
// instruction and try again.
if (AllowCommute) {
unsigned CommuteOpIdx1 = OpNum, CommuteOpIdx2 = CommuteAnyOperandIndex;
if (findCommutedOpIndices(MI, CommuteOpIdx1, CommuteOpIdx2)) {
bool HasDef = MI.getDesc().getNumDefs();
unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
unsigned Reg1 = MI.getOperand(CommuteOpIdx1).getReg();
unsigned Reg2 = MI.getOperand(CommuteOpIdx2).getReg();
bool Tied1 =
0 == MI.getDesc().getOperandConstraint(CommuteOpIdx1, MCOI::TIED_TO);
bool Tied2 =
0 == MI.getDesc().getOperandConstraint(CommuteOpIdx2, MCOI::TIED_TO);
// If either of the commutable operands are tied to the destination
// then we can not commute + fold.
if ((HasDef && Reg0 == Reg1 && Tied1) ||
(HasDef && Reg0 == Reg2 && Tied2))
return nullptr;
MachineInstr *CommutedMI =
commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
if (!CommutedMI) {
// Unable to commute.
return nullptr;
}
if (CommutedMI != &MI) {
// New instruction. We can't fold from this.
CommutedMI->eraseFromParent();
return nullptr;
}
// Attempt to fold with the commuted version of the instruction.
NewMI = foldMemoryOperandImpl(MF, MI, CommuteOpIdx2, MOs, InsertPt,
Size, Align, /*AllowCommute=*/false);
if (NewMI)
return NewMI;
// Folding failed again - undo the commute before returning.
MachineInstr *UncommutedMI =
commuteInstruction(MI, false, CommuteOpIdx1, CommuteOpIdx2);
if (!UncommutedMI) {
// Unable to commute.
return nullptr;
}
if (UncommutedMI != &MI) {
// New instruction. It doesn't need to be kept.
UncommutedMI->eraseFromParent();
return nullptr;
}
// Return here to prevent duplicate fuse failure report.
return nullptr;
}
}
// No fusion
if (PrintFailedFusing && !MI.isCopy())
dbgs() << "We failed to fuse operand " << OpNum << " in " << MI;
return nullptr;
}
MachineInstr *
X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI,
ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex, LiveIntervals *LIS) const {
// Check switch flag
if (NoFusing)
return nullptr;
// Avoid partial and undef register update stalls unless optimizing for size.
if (!MF.getFunction().optForSize() &&
(hasPartialRegUpdate(MI.getOpcode(), Subtarget) ||
shouldPreventUndefRegUpdateMemFold(MF, MI)))
return nullptr;
// Don't fold subreg spills, or reloads that use a high subreg.
for (auto Op : Ops) {
MachineOperand &MO = MI.getOperand(Op);
auto SubReg = MO.getSubReg();
if (SubReg && (MO.isDef() || SubReg == X86::sub_8bit_hi))
return nullptr;
}
const MachineFrameInfo &MFI = MF.getFrameInfo();
unsigned Size = MFI.getObjectSize(FrameIndex);
unsigned Alignment = MFI.getObjectAlignment(FrameIndex);
// If the function stack isn't realigned we don't want to fold instructions
// that need increased alignment.
if (!RI.needsStackRealignment(MF))
Alignment =
std::min(Alignment, Subtarget.getFrameLowering()->getStackAlignment());
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
unsigned RCSize = 0;
switch (MI.getOpcode()) {
default: return nullptr;
case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
}
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
if (Size < RCSize)
return nullptr;
// Change to CMPXXri r, 0 first.
MI.setDesc(get(NewOpc));
MI.getOperand(1).ChangeToImmediate(0);
} else if (Ops.size() != 1)
return nullptr;
return foldMemoryOperandImpl(MF, MI, Ops[0],
MachineOperand::CreateFI(FrameIndex), InsertPt,
Size, Alignment, /*AllowCommute=*/true);
}
/// Check if \p LoadMI is a partial register load that we can't fold into \p MI
/// because the latter uses contents that wouldn't be defined in the folded
/// version. For instance, this transformation isn't legal:
/// movss (%rdi), %xmm0
/// addps %xmm0, %xmm0
/// ->
/// addps (%rdi), %xmm0
///
/// But this one is:
/// movss (%rdi), %xmm0
/// addss %xmm0, %xmm0
/// ->
/// addss (%rdi), %xmm0
///
static bool isNonFoldablePartialRegisterLoad(const MachineInstr &LoadMI,
const MachineInstr &UserMI,
const MachineFunction &MF) {
unsigned Opc = LoadMI.getOpcode();
unsigned UserOpc = UserMI.getOpcode();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC =
MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg());
unsigned RegSize = TRI.getRegSizeInBits(*RC);
if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm) &&
RegSize > 32) {
// These instructions only load 32 bits, we can't fold them if the
// destination register is wider than 32 bits (4 bytes), and its user
// instruction isn't scalar (SS).
switch (UserOpc) {
case X86::ADDSSrr_Int: case X86::VADDSSrr_Int: case X86::VADDSSZrr_Int:
case X86::CMPSSrr_Int: case X86::VCMPSSrr_Int: case X86::VCMPSSZrr_Int:
case X86::DIVSSrr_Int: case X86::VDIVSSrr_Int: case X86::VDIVSSZrr_Int:
case X86::MAXSSrr_Int: case X86::VMAXSSrr_Int: case X86::VMAXSSZrr_Int:
case X86::MINSSrr_Int: case X86::VMINSSrr_Int: case X86::VMINSSZrr_Int:
case X86::MULSSrr_Int: case X86::VMULSSrr_Int: case X86::VMULSSZrr_Int:
case X86::SUBSSrr_Int: case X86::VSUBSSrr_Int: case X86::VSUBSSZrr_Int:
case X86::VADDSSZrr_Intk: case X86::VADDSSZrr_Intkz:
case X86::VDIVSSZrr_Intk: case X86::VDIVSSZrr_Intkz:
case X86::VMAXSSZrr_Intk: case X86::VMAXSSZrr_Intkz:
case X86::VMINSSZrr_Intk: case X86::VMINSSZrr_Intkz:
case X86::VMULSSZrr_Intk: case X86::VMULSSZrr_Intkz:
case X86::VSUBSSZrr_Intk: case X86::VSUBSSZrr_Intkz:
case X86::VFMADDSS4rr_Int: case X86::VFNMADDSS4rr_Int:
case X86::VFMSUBSS4rr_Int: case X86::VFNMSUBSS4rr_Int:
case X86::VFMADD132SSr_Int: case X86::VFNMADD132SSr_Int:
case X86::VFMADD213SSr_Int: case X86::VFNMADD213SSr_Int:
case X86::VFMADD231SSr_Int: case X86::VFNMADD231SSr_Int:
case X86::VFMSUB132SSr_Int: case X86::VFNMSUB132SSr_Int:
case X86::VFMSUB213SSr_Int: case X86::VFNMSUB213SSr_Int:
case X86::VFMSUB231SSr_Int: case X86::VFNMSUB231SSr_Int:
case X86::VFMADD132SSZr_Int: case X86::VFNMADD132SSZr_Int:
case X86::VFMADD213SSZr_Int: case X86::VFNMADD213SSZr_Int:
case X86::VFMADD231SSZr_Int: case X86::VFNMADD231SSZr_Int:
case X86::VFMSUB132SSZr_Int: case X86::VFNMSUB132SSZr_Int:
case X86::VFMSUB213SSZr_Int: case X86::VFNMSUB213SSZr_Int:
case X86::VFMSUB231SSZr_Int: case X86::VFNMSUB231SSZr_Int:
case X86::VFMADD132SSZr_Intk: case X86::VFNMADD132SSZr_Intk:
case X86::VFMADD213SSZr_Intk: case X86::VFNMADD213SSZr_Intk:
case X86::VFMADD231SSZr_Intk: case X86::VFNMADD231SSZr_Intk:
case X86::VFMSUB132SSZr_Intk: case X86::VFNMSUB132SSZr_Intk:
case X86::VFMSUB213SSZr_Intk: case X86::VFNMSUB213SSZr_Intk:
case X86::VFMSUB231SSZr_Intk: case X86::VFNMSUB231SSZr_Intk:
case X86::VFMADD132SSZr_Intkz: case X86::VFNMADD132SSZr_Intkz:
case X86::VFMADD213SSZr_Intkz: case X86::VFNMADD213SSZr_Intkz:
case X86::VFMADD231SSZr_Intkz: case X86::VFNMADD231SSZr_Intkz:
case X86::VFMSUB132SSZr_Intkz: case X86::VFNMSUB132SSZr_Intkz:
case X86::VFMSUB213SSZr_Intkz: case X86::VFNMSUB213SSZr_Intkz:
case X86::VFMSUB231SSZr_Intkz: case X86::VFNMSUB231SSZr_Intkz:
return false;
default:
return true;
}
}
if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm) &&
RegSize > 64) {
// These instructions only load 64 bits, we can't fold them if the
// destination register is wider than 64 bits (8 bytes), and its user
// instruction isn't scalar (SD).
switch (UserOpc) {
case X86::ADDSDrr_Int: case X86::VADDSDrr_Int: case X86::VADDSDZrr_Int:
case X86::CMPSDrr_Int: case X86::VCMPSDrr_Int: case X86::VCMPSDZrr_Int:
case X86::DIVSDrr_Int: case X86::VDIVSDrr_Int: case X86::VDIVSDZrr_Int:
case X86::MAXSDrr_Int: case X86::VMAXSDrr_Int: case X86::VMAXSDZrr_Int:
case X86::MINSDrr_Int: case X86::VMINSDrr_Int: case X86::VMINSDZrr_Int:
case X86::MULSDrr_Int: case X86::VMULSDrr_Int: case X86::VMULSDZrr_Int:
case X86::SUBSDrr_Int: case X86::VSUBSDrr_Int: case X86::VSUBSDZrr_Int:
case X86::VADDSDZrr_Intk: case X86::VADDSDZrr_Intkz:
case X86::VDIVSDZrr_Intk: case X86::VDIVSDZrr_Intkz:
case X86::VMAXSDZrr_Intk: case X86::VMAXSDZrr_Intkz:
case X86::VMINSDZrr_Intk: case X86::VMINSDZrr_Intkz:
case X86::VMULSDZrr_Intk: case X86::VMULSDZrr_Intkz:
case X86::VSUBSDZrr_Intk: case X86::VSUBSDZrr_Intkz:
case X86::VFMADDSD4rr_Int: case X86::VFNMADDSD4rr_Int:
case X86::VFMSUBSD4rr_Int: case X86::VFNMSUBSD4rr_Int:
case X86::VFMADD132SDr_Int: case X86::VFNMADD132SDr_Int:
case X86::VFMADD213SDr_Int: case X86::VFNMADD213SDr_Int:
case X86::VFMADD231SDr_Int: case X86::VFNMADD231SDr_Int:
case X86::VFMSUB132SDr_Int: case X86::VFNMSUB132SDr_Int:
case X86::VFMSUB213SDr_Int: case X86::VFNMSUB213SDr_Int:
case X86::VFMSUB231SDr_Int: case X86::VFNMSUB231SDr_Int:
case X86::VFMADD132SDZr_Int: case X86::VFNMADD132SDZr_Int:
case X86::VFMADD213SDZr_Int: case X86::VFNMADD213SDZr_Int:
case X86::VFMADD231SDZr_Int: case X86::VFNMADD231SDZr_Int:
case X86::VFMSUB132SDZr_Int: case X86::VFNMSUB132SDZr_Int:
case X86::VFMSUB213SDZr_Int: case X86::VFNMSUB213SDZr_Int:
case X86::VFMSUB231SDZr_Int: case X86::VFNMSUB231SDZr_Int:
case X86::VFMADD132SDZr_Intk: case X86::VFNMADD132SDZr_Intk:
case X86::VFMADD213SDZr_Intk: case X86::VFNMADD213SDZr_Intk:
case X86::VFMADD231SDZr_Intk: case X86::VFNMADD231SDZr_Intk:
case X86::VFMSUB132SDZr_Intk: case X86::VFNMSUB132SDZr_Intk:
case X86::VFMSUB213SDZr_Intk: case X86::VFNMSUB213SDZr_Intk:
case X86::VFMSUB231SDZr_Intk: case X86::VFNMSUB231SDZr_Intk:
case X86::VFMADD132SDZr_Intkz: case X86::VFNMADD132SDZr_Intkz:
case X86::VFMADD213SDZr_Intkz: case X86::VFNMADD213SDZr_Intkz:
case X86::VFMADD231SDZr_Intkz: case X86::VFNMADD231SDZr_Intkz:
case X86::VFMSUB132SDZr_Intkz: case X86::VFNMSUB132SDZr_Intkz:
case X86::VFMSUB213SDZr_Intkz: case X86::VFNMSUB213SDZr_Intkz:
case X86::VFMSUB231SDZr_Intkz: case X86::VFNMSUB231SDZr_Intkz:
return false;
default:
return true;
}
}
return false;
}
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
LiveIntervals *LIS) const {
// TODO: Support the case where LoadMI loads a wide register, but MI
// only uses a subreg.
for (auto Op : Ops) {
if (MI.getOperand(Op).getSubReg())
return nullptr;
}
// If loading from a FrameIndex, fold directly from the FrameIndex.
unsigned NumOps = LoadMI.getDesc().getNumOperands();
int FrameIndex;
if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
return nullptr;
return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex, LIS);
}
// Check switch flag
if (NoFusing) return nullptr;
// Avoid partial and undef register update stalls unless optimizing for size.
if (!MF.getFunction().optForSize() &&
(hasPartialRegUpdate(MI.getOpcode(), Subtarget) ||
shouldPreventUndefRegUpdateMemFold(MF, MI)))
return nullptr;
// Determine the alignment of the load.
unsigned Alignment = 0;
if (LoadMI.hasOneMemOperand())
Alignment = (*LoadMI.memoperands_begin())->getAlignment();
else
switch (LoadMI.getOpcode()) {
case X86::AVX512_512_SET0:
case X86::AVX512_512_SETALLONES:
Alignment = 64;
break;
case X86::AVX2_SETALLONES:
case X86::AVX1_SETALLONES:
case X86::AVX_SET0:
case X86::AVX512_256_SET0:
Alignment = 32;
break;
case X86::V_SET0:
case X86::V_SETALLONES:
case X86::AVX512_128_SET0:
Alignment = 16;
break;
case X86::MMX_SET0:
case X86::FsFLD0SD:
case X86::AVX512_FsFLD0SD:
Alignment = 8;
break;
case X86::FsFLD0SS:
case X86::AVX512_FsFLD0SS:
Alignment = 4;
break;
default:
return nullptr;
}
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
switch (MI.getOpcode()) {
default: return nullptr;
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
}
// Change to CMPXXri r, 0 first.
MI.setDesc(get(NewOpc));
MI.getOperand(1).ChangeToImmediate(0);
} else if (Ops.size() != 1)
return nullptr;
// Make sure the subregisters match.
// Otherwise we risk changing the size of the load.
if (LoadMI.getOperand(0).getSubReg() != MI.getOperand(Ops[0]).getSubReg())
return nullptr;
SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
switch (LoadMI.getOpcode()) {
case X86::MMX_SET0:
case X86::V_SET0:
case X86::V_SETALLONES:
case X86::AVX2_SETALLONES:
case X86::AVX1_SETALLONES:
case X86::AVX_SET0:
case X86::AVX512_128_SET0:
case X86::AVX512_256_SET0:
case X86::AVX512_512_SET0:
case X86::AVX512_512_SETALLONES:
case X86::FsFLD0SD:
case X86::AVX512_FsFLD0SD:
case X86::FsFLD0SS:
case X86::AVX512_FsFLD0SS: {
// Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
// Create a constant-pool entry and operands to load from it.
// Medium and large mode can't fold loads this way.
if (MF.getTarget().getCodeModel() != CodeModel::Small &&
MF.getTarget().getCodeModel() != CodeModel::Kernel)
return nullptr;
// x86-32 PIC requires a PIC base register for constant pools.
unsigned PICBase = 0;
if (MF.getTarget().isPositionIndependent()) {
if (Subtarget.is64Bit())
PICBase = X86::RIP;
else
// FIXME: PICBase = getGlobalBaseReg(&MF);
// This doesn't work for several reasons.
// 1. GlobalBaseReg may have been spilled.
// 2. It may not be live at MI.
return nullptr;
}
// Create a constant-pool entry.
MachineConstantPool &MCP = *MF.getConstantPool();
Type *Ty;
unsigned Opc = LoadMI.getOpcode();
if (Opc == X86::FsFLD0SS || Opc == X86::AVX512_FsFLD0SS)
Ty = Type::getFloatTy(MF.getFunction().getContext());
else if (Opc == X86::FsFLD0SD || Opc == X86::AVX512_FsFLD0SD)
Ty = Type::getDoubleTy(MF.getFunction().getContext());
else if (Opc == X86::AVX512_512_SET0 || Opc == X86::AVX512_512_SETALLONES)
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()),16);
else if (Opc == X86::AVX2_SETALLONES || Opc == X86::AVX_SET0 ||
Opc == X86::AVX512_256_SET0 || Opc == X86::AVX1_SETALLONES)
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 8);
else if (Opc == X86::MMX_SET0)
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 2);
else
Ty = VectorType::get(Type::getInt32Ty(MF.getFunction().getContext()), 4);
bool IsAllOnes = (Opc == X86::V_SETALLONES || Opc == X86::AVX2_SETALLONES ||
Opc == X86::AVX512_512_SETALLONES ||
Opc == X86::AVX1_SETALLONES);
const Constant *C = IsAllOnes ? Constant::getAllOnesValue(Ty) :
Constant::getNullValue(Ty);
unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
// Create operands to load from the constant pool entry.
MOs.push_back(MachineOperand::CreateReg(PICBase, false));
MOs.push_back(MachineOperand::CreateImm(1));
MOs.push_back(MachineOperand::CreateReg(0, false));
MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
MOs.push_back(MachineOperand::CreateReg(0, false));
break;
}
default: {
if (isNonFoldablePartialRegisterLoad(LoadMI, MI, MF))
return nullptr;
// Folding a normal load. Just copy the load's address operands.
MOs.append(LoadMI.operands_begin() + NumOps - X86::AddrNumOperands,
LoadMI.operands_begin() + NumOps);
break;
}
}
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt,
/*Size=*/0, Alignment, /*AllowCommute=*/true);
}
bool X86InstrInfo::unfoldMemoryOperand(
MachineFunction &MF, MachineInstr &MI, unsigned Reg, bool UnfoldLoad,
bool UnfoldStore, SmallVectorImpl<MachineInstr *> &NewMIs) const {
const X86MemoryFoldTableEntry *I = lookupUnfoldTable(MI.getOpcode());
if (I == nullptr)
return false;
unsigned Opc = I->DstOp;
unsigned Index = I->Flags & TB_INDEX_MASK;
bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
bool FoldedStore = I->Flags & TB_FOLDED_STORE;
if (UnfoldLoad && !FoldedLoad)
return false;
UnfoldLoad &= FoldedLoad;
if (UnfoldStore && !FoldedStore)
return false;
UnfoldStore &= FoldedStore;
const MCInstrDesc &MCID = get(Opc);
const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
// TODO: Check if 32-byte or greater accesses are slow too?
if (!MI.hasOneMemOperand() && RC == &X86::VR128RegClass &&
Subtarget.isUnalignedMem16Slow())
// Without memoperands, loadRegFromAddr and storeRegToStackSlot will
// conservatively assume the address is unaligned. That's bad for
// performance.
return false;
SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
SmallVector<MachineOperand,2> BeforeOps;
SmallVector<MachineOperand,2> AfterOps;
SmallVector<MachineOperand,4> ImpOps;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &Op = MI.getOperand(i);
if (i >= Index && i < Index + X86::AddrNumOperands)
AddrOps.push_back(Op);
else if (Op.isReg() && Op.isImplicit())
ImpOps.push_back(Op);
else if (i < Index)
BeforeOps.push_back(Op);
else if (i > Index)
AfterOps.push_back(Op);
}
// Emit the load instruction.
if (UnfoldLoad) {
std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs =
MF.extractLoadMemRefs(MI.memoperands_begin(), MI.memoperands_end());
loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
if (UnfoldStore) {
// Address operands cannot be marked isKill.
for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
MachineOperand &MO = NewMIs[0]->getOperand(i);
if (MO.isReg())
MO.setIsKill(false);
}
}
}
// Emit the data processing instruction.
MachineInstr *DataMI = MF.CreateMachineInstr(MCID, MI.getDebugLoc(), true);
MachineInstrBuilder MIB(MF, DataMI);
if (FoldedStore)
MIB.addReg(Reg, RegState::Define);
for (MachineOperand &BeforeOp : BeforeOps)
MIB.add(BeforeOp);
if (FoldedLoad)
MIB.addReg(Reg);
for (MachineOperand &AfterOp : AfterOps)
MIB.add(AfterOp);
for (MachineOperand &ImpOp : ImpOps) {
MIB.addReg(ImpOp.getReg(),
getDefRegState(ImpOp.isDef()) |
RegState::Implicit |
getKillRegState(ImpOp.isKill()) |
getDeadRegState(ImpOp.isDead()) |
getUndefRegState(ImpOp.isUndef()));
}
// Change CMP32ri r, 0 back to TEST32rr r, r, etc.
switch (DataMI->getOpcode()) {
default: break;
case X86::CMP64ri32:
case X86::CMP64ri8:
case X86::CMP32ri:
case X86::CMP32ri8:
case X86::CMP16ri:
case X86::CMP16ri8:
case X86::CMP8ri: {
MachineOperand &MO0 = DataMI->getOperand(0);
MachineOperand &MO1 = DataMI->getOperand(1);
if (MO1.getImm() == 0) {
unsigned NewOpc;
switch (DataMI->getOpcode()) {
default: llvm_unreachable("Unreachable!");
case X86::CMP64ri8:
case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
case X86::CMP32ri8:
case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
case X86::CMP16ri8:
case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
}
DataMI->setDesc(get(NewOpc));
MO1.ChangeToRegister(MO0.getReg(), false);
}
}
}
NewMIs.push_back(DataMI);
// Emit the store instruction.
if (UnfoldStore) {
const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
std::pair<MachineInstr::mmo_iterator, MachineInstr::mmo_iterator> MMOs =
MF.extractStoreMemRefs(MI.memoperands_begin(), MI.memoperands_end());
storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs);
}
return true;
}
bool
X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
SmallVectorImpl<SDNode*> &NewNodes) const {
if (!N->isMachineOpcode())
return false;
const X86MemoryFoldTableEntry *I = lookupUnfoldTable(N->getMachineOpcode());
if (I == nullptr)
return false;
unsigned Opc = I->DstOp;
unsigned Index = I->Flags & TB_INDEX_MASK;
bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
bool FoldedStore = I->Flags & TB_FOLDED_STORE;
const MCInstrDesc &MCID = get(Opc);
MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
unsigned NumDefs = MCID.NumDefs;
std::vector<SDValue> AddrOps;
std::vector<SDValue> BeforeOps;
std::vector<SDValue> AfterOps;
SDLoc dl(N);
unsigned NumOps = N->getNumOperands();
for (unsigned i = 0; i != NumOps-1; ++i) {
SDValue Op = N->getOperand(i);
if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
AddrOps.push_back(Op);
else if (i < Index-NumDefs)
BeforeOps.push_back(Op);
else if (i > Index-NumDefs)
AfterOps.push_back(Op);
}
SDValue Chain = N->getOperand(NumOps-1);
AddrOps.push_back(Chain);
// Emit the load instruction.
SDNode *Load = nullptr;
if (FoldedLoad) {
EVT VT = *TRI.legalclasstypes_begin(*RC);
std::pair<MachineInstr::mmo_iterator,
MachineInstr::mmo_iterator> MMOs =
MF.extractLoadMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
cast<MachineSDNode>(N)->memoperands_end());
if (!(*MMOs.first) &&
RC == &X86::VR128RegClass &&
Subtarget.isUnalignedMem16Slow())
// Do not introduce a slow unaligned load.
return false;
// FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
// memory access is slow above.
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = (*MMOs.first) &&
(*MMOs.first)->getAlignment() >= Alignment;
Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, Subtarget), dl,
VT, MVT::Other, AddrOps);
NewNodes.push_back(Load);
// Preserve memory reference information.
cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second);
}
// Emit the data processing instruction.
std::vector<EVT> VTs;
const TargetRegisterClass *DstRC = nullptr;
if (MCID.getNumDefs() > 0) {
DstRC = getRegClass(MCID, 0, &RI, MF);
VTs.push_back(*TRI.legalclasstypes_begin(*DstRC));
}
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
if (VT != MVT::Other && i >= (unsigned)MCID.getNumDefs())
VTs.push_back(VT);
}
if (Load)
BeforeOps.push_back(SDValue(Load, 0));
BeforeOps.insert(BeforeOps.end(), AfterOps.begin(), AfterOps.end());
// Change CMP32ri r, 0 back to TEST32rr r, r, etc.
switch (Opc) {
default: break;
case X86::CMP64ri32:
case X86::CMP64ri8:
case X86::CMP32ri:
case X86::CMP32ri8:
case X86::CMP16ri:
case X86::CMP16ri8:
case X86::CMP8ri:
if (isNullConstant(BeforeOps[1])) {
switch (Opc) {
default: llvm_unreachable("Unreachable!");
case X86::CMP64ri8:
case X86::CMP64ri32: Opc = X86::TEST64rr; break;
case X86::CMP32ri8:
case X86::CMP32ri: Opc = X86::TEST32rr; break;
case X86::CMP16ri8:
case X86::CMP16ri: Opc = X86::TEST16rr; break;
case X86::CMP8ri: Opc = X86::TEST8rr; break;
}
BeforeOps[1] = BeforeOps[0];
}
}
SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, BeforeOps);
NewNodes.push_back(NewNode);
// Emit the store instruction.
if (FoldedStore) {
AddrOps.pop_back();
AddrOps.push_back(SDValue(NewNode, 0));
AddrOps.push_back(Chain);
std::pair<MachineInstr::mmo_iterator,
MachineInstr::mmo_iterator> MMOs =
MF.extractStoreMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
cast<MachineSDNode>(N)->memoperands_end());
if (!(*MMOs.first) &&
RC == &X86::VR128RegClass &&
Subtarget.isUnalignedMem16Slow())
// Do not introduce a slow unaligned store.
return false;
// FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
// memory access is slow above.
unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = (*MMOs.first) &&
(*MMOs.first)->getAlignment() >= Alignment;
SDNode *Store =
DAG.getMachineNode(getStoreRegOpcode(0, DstRC, isAligned, Subtarget),
dl, MVT::Other, AddrOps);
NewNodes.push_back(Store);
// Preserve memory reference information.
cast<MachineSDNode>(Store)->setMemRefs(MMOs.first, MMOs.second);
}
return true;
}
unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore,
unsigned *LoadRegIndex) const {
const X86MemoryFoldTableEntry *I = lookupUnfoldTable(Opc);
if (I == nullptr)
return 0;
bool FoldedLoad = I->Flags & TB_FOLDED_LOAD;
bool FoldedStore = I->Flags & TB_FOLDED_STORE;
if (UnfoldLoad && !FoldedLoad)
return 0;
if (UnfoldStore && !FoldedStore)
return 0;
if (LoadRegIndex)
*LoadRegIndex = I->Flags & TB_INDEX_MASK;
return I->DstOp;
}
bool
X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
int64_t &Offset1, int64_t &Offset2) const {
if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
return false;
unsigned Opc1 = Load1->getMachineOpcode();
unsigned Opc2 = Load2->getMachineOpcode();
switch (Opc1) {
default: return false;
case X86::MOV8rm:
case X86::MOV16rm:
case X86::MOV32rm:
case X86::MOV64rm:
case X86::LD_Fp32m:
case X86::LD_Fp64m:
case X86::LD_Fp80m:
case X86::MOVSSrm:
case X86::MOVSDrm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
case X86::MOVAPSrm:
case X86::MOVUPSrm:
case X86::MOVAPDrm:
case X86::MOVUPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
// AVX load instructions
case X86::VMOVSSrm:
case X86::VMOVSDrm:
case X86::VMOVAPSrm:
case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
case X86::VMOVUPDrm:
case X86::VMOVDQArm:
case X86::VMOVDQUrm:
case X86::VMOVAPSYrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPDYrm:
case X86::VMOVUPDYrm:
case X86::VMOVDQAYrm:
case X86::VMOVDQUYrm:
// AVX512 load instructions
case X86::VMOVSSZrm:
case X86::VMOVSDZrm:
case X86::VMOVAPSZ128rm:
case X86::VMOVUPSZ128rm:
case X86::VMOVAPSZ128rm_NOVLX:
case X86::VMOVUPSZ128rm_NOVLX:
case X86::VMOVAPDZ128rm:
case X86::VMOVUPDZ128rm:
case X86::VMOVDQU8Z128rm:
case X86::VMOVDQU16Z128rm:
case X86::VMOVDQA32Z128rm:
case X86::VMOVDQU32Z128rm:
case X86::VMOVDQA64Z128rm:
case X86::VMOVDQU64Z128rm:
case X86::VMOVAPSZ256rm:
case X86::VMOVUPSZ256rm:
case X86::VMOVAPSZ256rm_NOVLX:
case X86::VMOVUPSZ256rm_NOVLX:
case X86::VMOVAPDZ256rm:
case X86::VMOVUPDZ256rm:
case X86::VMOVDQU8Z256rm:
case X86::VMOVDQU16Z256rm:
case X86::VMOVDQA32Z256rm:
case X86::VMOVDQU32Z256rm:
case X86::VMOVDQA64Z256rm:
case X86::VMOVDQU64Z256rm:
case X86::VMOVAPSZrm:
case X86::VMOVUPSZrm:
case X86::VMOVAPDZrm:
case X86::VMOVUPDZrm:
case X86::VMOVDQU8Zrm:
case X86::VMOVDQU16Zrm:
case X86::VMOVDQA32Zrm:
case X86::VMOVDQU32Zrm:
case X86::VMOVDQA64Zrm:
case X86::VMOVDQU64Zrm:
case X86::KMOVBkm:
case X86::KMOVWkm:
case X86::KMOVDkm:
case X86::KMOVQkm:
break;
}
switch (Opc2) {
default: return false;
case X86::MOV8rm:
case X86::MOV16rm:
case X86::MOV32rm:
case X86::MOV64rm:
case X86::LD_Fp32m:
case X86::LD_Fp64m:
case X86::LD_Fp80m:
case X86::MOVSSrm:
case X86::MOVSDrm:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
case X86::MOVAPSrm:
case X86::MOVUPSrm:
case X86::MOVAPDrm:
case X86::MOVUPDrm:
case X86::MOVDQArm:
case X86::MOVDQUrm:
// AVX load instructions
case X86::VMOVSSrm:
case X86::VMOVSDrm:
case X86::VMOVAPSrm:
case X86::VMOVUPSrm:
case X86::VMOVAPDrm:
case X86::VMOVUPDrm:
case X86::VMOVDQArm:
case X86::VMOVDQUrm:
case X86::VMOVAPSYrm:
case X86::VMOVUPSYrm:
case X86::VMOVAPDYrm:
case X86::VMOVUPDYrm:
case X86::VMOVDQAYrm:
case X86::VMOVDQUYrm:
// AVX512 load instructions
case X86::VMOVSSZrm:
case X86::VMOVSDZrm:
case X86::VMOVAPSZ128rm:
case X86::VMOVUPSZ128rm:
case X86::VMOVAPSZ128rm_NOVLX:
case X86::VMOVUPSZ128rm_NOVLX:
case X86::VMOVAPDZ128rm:
case X86::VMOVUPDZ128rm:
case X86::VMOVDQU8Z128rm:
case X86::VMOVDQU16Z128rm:
case X86::VMOVDQA32Z128rm:
case X86::VMOVDQU32Z128rm:
case X86::VMOVDQA64Z128rm:
case X86::VMOVDQU64Z128rm:
case X86::VMOVAPSZ256rm:
case X86::VMOVUPSZ256rm:
case X86::VMOVAPSZ256rm_NOVLX:
case X86::VMOVUPSZ256rm_NOVLX:
case X86::VMOVAPDZ256rm:
case X86::VMOVUPDZ256rm:
case X86::VMOVDQU8Z256rm:
case X86::VMOVDQU16Z256rm:
case X86::VMOVDQA32Z256rm:
case X86::VMOVDQU32Z256rm:
case X86::VMOVDQA64Z256rm:
case X86::VMOVDQU64Z256rm:
case X86::VMOVAPSZrm:
case X86::VMOVUPSZrm:
case X86::VMOVAPDZrm:
case X86::VMOVUPDZrm:
case X86::VMOVDQU8Zrm:
case X86::VMOVDQU16Zrm:
case X86::VMOVDQA32Zrm:
case X86::VMOVDQU32Zrm:
case X86::VMOVDQA64Zrm:
case X86::VMOVDQU64Zrm:
case X86::KMOVBkm:
case X86::KMOVWkm:
case X86::KMOVDkm:
case X86::KMOVQkm:
break;
}
// Lambda to check if both the loads have the same value for an operand index.
auto HasSameOp = [&](int I) {
return Load1->getOperand(I) == Load2->getOperand(I);
};
// All operands except the displacement should match.
if (!HasSameOp(X86::AddrBaseReg) || !HasSameOp(X86::AddrScaleAmt) ||
!HasSameOp(X86::AddrIndexReg) || !HasSameOp(X86::AddrSegmentReg))
return false;
// Chain Operand must be the same.
if (!HasSameOp(5))
return false;
// Now let's examine if the displacements are constants.
auto Disp1 = dyn_cast<ConstantSDNode>(Load1->getOperand(X86::AddrDisp));
auto Disp2 = dyn_cast<ConstantSDNode>(Load2->getOperand(X86::AddrDisp));
if (!Disp1 || !Disp2)
return false;
Offset1 = Disp1->getSExtValue();
Offset2 = Disp2->getSExtValue();
return true;
}
bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const {
assert(Offset2 > Offset1);
if ((Offset2 - Offset1) / 8 > 64)
return false;
unsigned Opc1 = Load1->getMachineOpcode();
unsigned Opc2 = Load2->getMachineOpcode();
if (Opc1 != Opc2)
return false; // FIXME: overly conservative?
switch (Opc1) {
default: break;
case X86::LD_Fp32m:
case X86::LD_Fp64m:
case X86::LD_Fp80m:
case X86::MMX_MOVD64rm:
case X86::MMX_MOVQ64rm:
return false;
}
EVT VT = Load1->getValueType(0);
switch (VT.getSimpleVT().SimpleTy) {
default:
// XMM registers. In 64-bit mode we can be a bit more aggressive since we
// have 16 of them to play with.
if (Subtarget.is64Bit()) {
if (NumLoads >= 3)
return false;
} else if (NumLoads) {
return false;
}
break;
case MVT::i8:
case MVT::i16:
case MVT::i32:
case MVT::i64:
case MVT::f32:
case MVT::f64:
if (NumLoads)
return false;
break;
}
return true;
}
bool X86InstrInfo::
reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
assert(Cond.size() == 1 && "Invalid X86 branch condition!");
X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
Cond[0].setImm(GetOppositeBranchCondition(CC));
return false;
}
bool X86InstrInfo::
isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
// FIXME: Return false for x87 stack register classes for now. We can't
// allow any loads of these registers before FpGet_ST0_80.
return !(RC == &X86::CCRRegClass || RC == &X86::DFCCRRegClass ||
RC == &X86::RFP32RegClass || RC == &X86::RFP64RegClass ||
RC == &X86::RFP80RegClass);
}
/// Return a virtual register initialized with the
/// the global base register value. Output instructions required to
/// initialize the register in the function entry block, if necessary.
///
/// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
///
unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
assert((!Subtarget.is64Bit() ||
MF->getTarget().getCodeModel() == CodeModel::Medium ||
MF->getTarget().getCodeModel() == CodeModel::Large) &&
"X86-64 PIC uses RIP relative addressing");
X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
if (GlobalBaseReg != 0)
return GlobalBaseReg;
// Create the register. The code to initialize it is inserted
// later, by the CGBR pass (below).
MachineRegisterInfo &RegInfo = MF->getRegInfo();
GlobalBaseReg = RegInfo.createVirtualRegister(
Subtarget.is64Bit() ? &X86::GR64_NOSPRegClass : &X86::GR32_NOSPRegClass);
X86FI->setGlobalBaseReg(GlobalBaseReg);
return GlobalBaseReg;
}
// These are the replaceable SSE instructions. Some of these have Int variants
// that we don't include here. We don't want to replace instructions selected
// by intrinsics.
static const uint16_t ReplaceableInstrs[][3] = {
//PackedSingle PackedDouble PackedInt
{ X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr },
{ X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm },
{ X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr },
{ X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr },
{ X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm },
{ X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr },
{ X86::MOVSDmr, X86::MOVSDmr, X86::MOVPQI2QImr },
{ X86::MOVSSmr, X86::MOVSSmr, X86::MOVPDI2DImr },
{ X86::MOVSDrm, X86::MOVSDrm, X86::MOVQI2PQIrm },
{ X86::MOVSSrm, X86::MOVSSrm, X86::MOVDI2PDIrm },
{ X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr },
{ X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm },
{ X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr },
{ X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm },
{ X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr },
{ X86::ORPSrm, X86::ORPDrm, X86::PORrm },
{ X86::ORPSrr, X86::ORPDrr, X86::PORrr },
{ X86::XORPSrm, X86::XORPDrm, X86::PXORrm },
{ X86::XORPSrr, X86::XORPDrr, X86::PXORrr },
{ X86::UNPCKLPDrm, X86::UNPCKLPDrm, X86::PUNPCKLQDQrm },
{ X86::MOVLHPSrr, X86::UNPCKLPDrr, X86::PUNPCKLQDQrr },
{ X86::UNPCKHPDrm, X86::UNPCKHPDrm, X86::PUNPCKHQDQrm },
{ X86::UNPCKHPDrr, X86::UNPCKHPDrr, X86::PUNPCKHQDQrr },
{ X86::UNPCKLPSrm, X86::UNPCKLPSrm, X86::PUNPCKLDQrm },
{ X86::UNPCKLPSrr, X86::UNPCKLPSrr, X86::PUNPCKLDQrr },
{ X86::UNPCKHPSrm, X86::UNPCKHPSrm, X86::PUNPCKHDQrm },
{ X86::UNPCKHPSrr, X86::UNPCKHPSrr, X86::PUNPCKHDQrr },
{ X86::EXTRACTPSmr, X86::EXTRACTPSmr, X86::PEXTRDmr },
{ X86::EXTRACTPSrr, X86::EXTRACTPSrr, X86::PEXTRDrr },
// AVX 128-bit support
{ X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr },
{ X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm },
{ X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr },
{ X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr },
{ X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm },
{ X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr },
{ X86::VMOVSDmr, X86::VMOVSDmr, X86::VMOVPQI2QImr },
{ X86::VMOVSSmr, X86::VMOVSSmr, X86::VMOVPDI2DImr },
{ X86::VMOVSDrm, X86::VMOVSDrm, X86::VMOVQI2PQIrm },
{ X86::VMOVSSrm, X86::VMOVSSrm, X86::VMOVDI2PDIrm },
{ X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
{ X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm },
{ X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr },
{ X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm },
{ X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr },
{ X86::VORPSrm, X86::VORPDrm, X86::VPORrm },
{ X86::VORPSrr, X86::VORPDrr, X86::VPORrr },
{ X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm },
{ X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr },
{ X86::VUNPCKLPDrm, X86::VUNPCKLPDrm, X86::VPUNPCKLQDQrm },
{ X86::VMOVLHPSrr, X86::VUNPCKLPDrr, X86::VPUNPCKLQDQrr },
{ X86::VUNPCKHPDrm, X86::VUNPCKHPDrm, X86::VPUNPCKHQDQrm },
{ X86::VUNPCKHPDrr, X86::VUNPCKHPDrr, X86::VPUNPCKHQDQrr },
{ X86::VUNPCKLPSrm, X86::VUNPCKLPSrm, X86::VPUNPCKLDQrm },
{ X86::VUNPCKLPSrr, X86::VUNPCKLPSrr, X86::VPUNPCKLDQrr },
{ X86::VUNPCKHPSrm, X86::VUNPCKHPSrm, X86::VPUNPCKHDQrm },
{ X86::VUNPCKHPSrr, X86::VUNPCKHPSrr, X86::VPUNPCKHDQrr },
{ X86::VEXTRACTPSmr, X86::VEXTRACTPSmr, X86::VPEXTRDmr },
{ X86::VEXTRACTPSrr, X86::VEXTRACTPSrr, X86::VPEXTRDrr },
// AVX 256-bit support
{ X86::VMOVAPSYmr, X86::VMOVAPDYmr, X86::VMOVDQAYmr },
{ X86::VMOVAPSYrm, X86::VMOVAPDYrm, X86::VMOVDQAYrm },
{ X86::VMOVAPSYrr, X86::VMOVAPDYrr, X86::VMOVDQAYrr },
{ X86::VMOVUPSYmr, X86::VMOVUPDYmr, X86::VMOVDQUYmr },
{ X86::VMOVUPSYrm, X86::VMOVUPDYrm, X86::VMOVDQUYrm },
{ X86::VMOVNTPSYmr, X86::VMOVNTPDYmr, X86::VMOVNTDQYmr },
{ X86::VPERMPSYrm, X86::VPERMPSYrm, X86::VPERMDYrm },
{ X86::VPERMPSYrr, X86::VPERMPSYrr, X86::VPERMDYrr },
{ X86::VPERMPDYmi, X86::VPERMPDYmi, X86::VPERMQYmi },
{ X86::VPERMPDYri, X86::VPERMPDYri, X86::VPERMQYri },
// AVX512 support
{ X86::VMOVLPSZ128mr, X86::VMOVLPDZ128mr, X86::VMOVPQI2QIZmr },
{ X86::VMOVNTPSZ128mr, X86::VMOVNTPDZ128mr, X86::VMOVNTDQZ128mr },
{ X86::VMOVNTPSZ256mr, X86::VMOVNTPDZ256mr, X86::VMOVNTDQZ256mr },
{ X86::VMOVNTPSZmr, X86::VMOVNTPDZmr, X86::VMOVNTDQZmr },
{ X86::VMOVSDZmr, X86::VMOVSDZmr, X86::VMOVPQI2QIZmr },
{ X86::VMOVSSZmr, X86::VMOVSSZmr, X86::VMOVPDI2DIZmr },
{ X86::VMOVSDZrm, X86::VMOVSDZrm, X86::VMOVQI2PQIZrm },
{ X86::VMOVSSZrm, X86::VMOVSSZrm, X86::VMOVDI2PDIZrm },
{ X86::VBROADCASTSSZ128r, X86::VBROADCASTSSZ128r, X86::VPBROADCASTDZ128r },
{ X86::VBROADCASTSSZ128m, X86::VBROADCASTSSZ128m, X86::VPBROADCASTDZ128m },
{ X86::VBROADCASTSSZ256r, X86::VBROADCASTSSZ256r, X86::VPBROADCASTDZ256r },
{ X86::VBROADCASTSSZ256m, X86::VBROADCASTSSZ256m, X86::VPBROADCASTDZ256m },
{ X86::VBROADCASTSSZr, X86::VBROADCASTSSZr, X86::VPBROADCASTDZr },
{ X86::VBROADCASTSSZm, X86::VBROADCASTSSZm, X86::VPBROADCASTDZm },
{ X86::VBROADCASTSDZ256r, X86::VBROADCASTSDZ256r, X86::VPBROADCASTQZ256r },
{ X86::VBROADCASTSDZ256m, X86::VBROADCASTSDZ256m, X86::VPBROADCASTQZ256m },
{ X86::VBROADCASTSDZr, X86::VBROADCASTSDZr, X86::VPBROADCASTQZr },
{ X86::VBROADCASTSDZm, X86::VBROADCASTSDZm, X86::VPBROADCASTQZm },
{ X86::VINSERTF32x4Zrr, X86::VINSERTF32x4Zrr, X86::VINSERTI32x4Zrr },
{ X86::VINSERTF32x4Zrm, X86::VINSERTF32x4Zrm, X86::VINSERTI32x4Zrm },
{ X86::VINSERTF32x8Zrr, X86::VINSERTF32x8Zrr, X86::VINSERTI32x8Zrr },
{ X86::VINSERTF32x8Zrm, X86::VINSERTF32x8Zrm, X86::VINSERTI32x8Zrm },
{ X86::VINSERTF64x2Zrr, X86::VINSERTF64x2Zrr, X86::VINSERTI64x2Zrr },
{ X86::VINSERTF64x2Zrm, X86::VINSERTF64x2Zrm, X86::VINSERTI64x2Zrm },
{ X86::VINSERTF64x4Zrr, X86::VINSERTF64x4Zrr, X86::VINSERTI64x4Zrr },
{ X86::VINSERTF64x4Zrm, X86::VINSERTF64x4Zrm, X86::VINSERTI64x4Zrm },
{ X86::VINSERTF32x4Z256rr,X86::VINSERTF32x4Z256rr,X86::VINSERTI32x4Z256rr },
{ X86::VINSERTF32x4Z256rm,X86::VINSERTF32x4Z256rm,X86::VINSERTI32x4Z256rm },
{ X86::VINSERTF64x2Z256rr,X86::VINSERTF64x2Z256rr,X86::VINSERTI64x2Z256rr },
{ X86::VINSERTF64x2Z256rm,X86::VINSERTF64x2Z256rm,X86::VINSERTI64x2Z256rm },
{ X86::VEXTRACTF32x4Zrr, X86::VEXTRACTF32x4Zrr, X86::VEXTRACTI32x4Zrr },
{ X86::VEXTRACTF32x4Zmr, X86::VEXTRACTF32x4Zmr, X86::VEXTRACTI32x4Zmr },
{ X86::VEXTRACTF32x8Zrr, X86::VEXTRACTF32x8Zrr, X86::VEXTRACTI32x8Zrr },
{ X86::VEXTRACTF32x8Zmr, X86::VEXTRACTF32x8Zmr, X86::VEXTRACTI32x8Zmr },
{ X86::VEXTRACTF64x2Zrr, X86::VEXTRACTF64x2Zrr, X86::VEXTRACTI64x2Zrr },
{ X86::VEXTRACTF64x2Zmr, X86::VEXTRACTF64x2Zmr, X86::VEXTRACTI64x2Zmr },
{ X86::VEXTRACTF64x4Zrr, X86::VEXTRACTF64x4Zrr, X86::VEXTRACTI64x4Zrr },
{ X86::VEXTRACTF64x4Zmr, X86::VEXTRACTF64x4Zmr, X86::VEXTRACTI64x4Zmr },
{ X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTF32x4Z256rr,X86::VEXTRACTI32x4Z256rr },
{ X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTF32x4Z256mr,X86::VEXTRACTI32x4Z256mr },
{ X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTF64x2Z256rr,X86::VEXTRACTI64x2Z256rr },
{ X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTF64x2Z256mr,X86::VEXTRACTI64x2Z256mr },
{ X86::VPERMILPSmi, X86::VPERMILPSmi, X86::VPSHUFDmi },
{ X86::VPERMILPSri, X86::VPERMILPSri, X86::VPSHUFDri },
{ X86::VPERMILPSZ128mi, X86::VPERMILPSZ128mi, X86::VPSHUFDZ128mi },
{ X86::VPERMILPSZ128ri, X86::VPERMILPSZ128ri, X86::VPSHUFDZ128ri },
{ X86::VPERMILPSZ256mi, X86::VPERMILPSZ256mi, X86::VPSHUFDZ256mi },
{ X86::VPERMILPSZ256ri, X86::VPERMILPSZ256ri, X86::VPSHUFDZ256ri },
{ X86::VPERMILPSZmi, X86::VPERMILPSZmi, X86::VPSHUFDZmi },
{ X86::VPERMILPSZri, X86::VPERMILPSZri, X86::VPSHUFDZri },
{ X86::VPERMPSZ256rm, X86::VPERMPSZ256rm, X86::VPERMDZ256rm },
{ X86::VPERMPSZ256rr, X86::VPERMPSZ256rr, X86::VPERMDZ256rr },
{ X86::VPERMPDZ256mi, X86::VPERMPDZ256mi, X86::VPERMQZ256mi },
{ X86::VPERMPDZ256ri, X86::VPERMPDZ256ri, X86::VPERMQZ256ri },
{ X86::VPERMPDZ256rm, X86::VPERMPDZ256rm, X86::VPERMQZ256rm },
{ X86::VPERMPDZ256rr, X86::VPERMPDZ256rr, X86::VPERMQZ256rr },
{ X86::VPERMPSZrm, X86::VPERMPSZrm, X86::VPERMDZrm },
{ X86::VPERMPSZrr, X86::VPERMPSZrr, X86::VPERMDZrr },
{ X86::VPERMPDZmi, X86::VPERMPDZmi, X86::VPERMQZmi },
{ X86::VPERMPDZri, X86::VPERMPDZri, X86::VPERMQZri },
{ X86::VPERMPDZrm, X86::VPERMPDZrm, X86::VPERMQZrm },
{ X86::VPERMPDZrr, X86::VPERMPDZrr, X86::VPERMQZrr },
{ X86::VUNPCKLPDZ256rm, X86::VUNPCKLPDZ256rm, X86::VPUNPCKLQDQZ256rm },
{ X86::VUNPCKLPDZ256rr, X86::VUNPCKLPDZ256rr, X86::VPUNPCKLQDQZ256rr },
{ X86::VUNPCKHPDZ256rm, X86::VUNPCKHPDZ256rm, X86::VPUNPCKHQDQZ256rm },
{ X86::VUNPCKHPDZ256rr, X86::VUNPCKHPDZ256rr, X86::VPUNPCKHQDQZ256rr },
{ X86::VUNPCKLPSZ256rm, X86::VUNPCKLPSZ256rm, X86::VPUNPCKLDQZ256rm },
{ X86::VUNPCKLPSZ256rr, X86::VUNPCKLPSZ256rr, X86::VPUNPCKLDQZ256rr },
{ X86::VUNPCKHPSZ256rm, X86::VUNPCKHPSZ256rm, X86::VPUNPCKHDQZ256rm },
{ X86::VUNPCKHPSZ256rr, X86::VUNPCKHPSZ256rr, X86::VPUNPCKHDQZ256rr },
{ X86::VUNPCKLPDZ128rm, X86::VUNPCKLPDZ128rm, X86::VPUNPCKLQDQZ128rm },
{ X86::VMOVLHPSZrr, X86::VUNPCKLPDZ128rr, X86::VPUNPCKLQDQZ128rr },
{ X86::VUNPCKHPDZ128rm, X86::VUNPCKHPDZ128rm, X86::VPUNPCKHQDQZ128rm },
{ X86::VUNPCKHPDZ128rr, X86::VUNPCKHPDZ128rr, X86::VPUNPCKHQDQZ128rr },
{ X86::VUNPCKLPSZ128rm, X86::VUNPCKLPSZ128rm, X86::VPUNPCKLDQZ128rm },
{ X86::VUNPCKLPSZ128rr, X86::VUNPCKLPSZ128rr, X86::VPUNPCKLDQZ128rr },
{ X86::VUNPCKHPSZ128rm, X86::VUNPCKHPSZ128rm, X86::VPUNPCKHDQZ128rm },
{ X86::VUNPCKHPSZ128rr, X86::VUNPCKHPSZ128rr, X86::VPUNPCKHDQZ128rr },
{ X86::VUNPCKLPDZrm, X86::VUNPCKLPDZrm, X86::VPUNPCKLQDQZrm },
{ X86::VUNPCKLPDZrr, X86::VUNPCKLPDZrr, X86::VPUNPCKLQDQZrr },
{ X86::VUNPCKHPDZrm, X86::VUNPCKHPDZrm, X86::VPUNPCKHQDQZrm },
{ X86::VUNPCKHPDZrr, X86::VUNPCKHPDZrr, X86::VPUNPCKHQDQZrr },
{ X86::VUNPCKLPSZrm, X86::VUNPCKLPSZrm, X86::VPUNPCKLDQZrm },
{ X86::VUNPCKLPSZrr, X86::VUNPCKLPSZrr, X86::VPUNPCKLDQZrr },
{ X86::VUNPCKHPSZrm, X86::VUNPCKHPSZrm, X86::VPUNPCKHDQZrm },
{ X86::VUNPCKHPSZrr, X86::VUNPCKHPSZrr, X86::VPUNPCKHDQZrr },
{ X86::VEXTRACTPSZmr, X86::VEXTRACTPSZmr, X86::VPEXTRDZmr },
{ X86::VEXTRACTPSZrr, X86::VEXTRACTPSZrr, X86::VPEXTRDZrr },
};
static const uint16_t ReplaceableInstrsAVX2[][3] = {
//PackedSingle PackedDouble PackedInt
{ X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNYrm },
{ X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNYrr },
{ X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDYrm },
{ X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDYrr },
{ X86::VORPSYrm, X86::VORPDYrm, X86::VPORYrm },
{ X86::VORPSYrr, X86::VORPDYrr, X86::VPORYrr },
{ X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORYrm },
{ X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORYrr },
{ X86::VPERM2F128rm, X86::VPERM2F128rm, X86::VPERM2I128rm },
{ X86::VPERM2F128rr, X86::VPERM2F128rr, X86::VPERM2I128rr },
{ X86::VBROADCASTSSrm, X86::VBROADCASTSSrm, X86::VPBROADCASTDrm},
{ X86::VBROADCASTSSrr, X86::VBROADCASTSSrr, X86::VPBROADCASTDrr},
{ X86::VBROADCASTSSYrr, X86::VBROADCASTSSYrr, X86::VPBROADCASTDYrr},
{ X86::VBROADCASTSSYrm, X86::VBROADCASTSSYrm, X86::VPBROADCASTDYrm},
{ X86::VBROADCASTSDYrr, X86::VBROADCASTSDYrr, X86::VPBROADCASTQYrr},
{ X86::VBROADCASTSDYrm, X86::VBROADCASTSDYrm, X86::VPBROADCASTQYrm},
{ X86::VBROADCASTF128, X86::VBROADCASTF128, X86::VBROADCASTI128 },
{ X86::VBLENDPSYrri, X86::VBLENDPSYrri, X86::VPBLENDDYrri },
{ X86::VBLENDPSYrmi, X86::VBLENDPSYrmi, X86::VPBLENDDYrmi },
{ X86::VPERMILPSYmi, X86::VPERMILPSYmi, X86::VPSHUFDYmi },
{ X86::VPERMILPSYri, X86::VPERMILPSYri, X86::VPSHUFDYri },
{ X86::VUNPCKLPDYrm, X86::VUNPCKLPDYrm, X86::VPUNPCKLQDQYrm },
{ X86::VUNPCKLPDYrr, X86::VUNPCKLPDYrr, X86::VPUNPCKLQDQYrr },
{ X86::VUNPCKHPDYrm, X86::VUNPCKHPDYrm, X86::VPUNPCKHQDQYrm },
{ X86::VUNPCKHPDYrr, X86::VUNPCKHPDYrr, X86::VPUNPCKHQDQYrr },
{ X86::VUNPCKLPSYrm, X86::VUNPCKLPSYrm, X86::VPUNPCKLDQYrm },
{ X86::VUNPCKLPSYrr, X86::VUNPCKLPSYrr, X86::VPUNPCKLDQYrr },
{ X86::VUNPCKHPSYrm, X86::VUNPCKHPSYrm, X86::VPUNPCKHDQYrm },
{ X86::VUNPCKHPSYrr, X86::VUNPCKHPSYrr, X86::VPUNPCKHDQYrr },
};
static const uint16_t ReplaceableInstrsAVX2InsertExtract[][3] = {
//PackedSingle PackedDouble PackedInt
{ X86::VEXTRACTF128mr, X86::VEXTRACTF128mr, X86::VEXTRACTI128mr },
{ X86::VEXTRACTF128rr, X86::VEXTRACTF128rr, X86::VEXTRACTI128rr },
{ X86::VINSERTF128rm, X86::VINSERTF128rm, X86::VINSERTI128rm },
{ X86::VINSERTF128rr, X86::VINSERTF128rr, X86::VINSERTI128rr },
};
static const uint16_t ReplaceableInstrsAVX512[][4] = {
// Two integer columns for 64-bit and 32-bit elements.
//PackedSingle PackedDouble PackedInt PackedInt
{ X86::VMOVAPSZ128mr, X86::VMOVAPDZ128mr, X86::VMOVDQA64Z128mr, X86::VMOVDQA32Z128mr },
{ X86::VMOVAPSZ128rm, X86::VMOVAPDZ128rm, X86::VMOVDQA64Z128rm, X86::VMOVDQA32Z128rm },
{ X86::VMOVAPSZ128rr, X86::VMOVAPDZ128rr, X86::VMOVDQA64Z128rr, X86::VMOVDQA32Z128rr },
{ X86::VMOVUPSZ128mr, X86::VMOVUPDZ128mr, X86::VMOVDQU64Z128mr, X86::VMOVDQU32Z128mr },
{ X86::VMOVUPSZ128rm, X86::VMOVUPDZ128rm, X86::VMOVDQU64Z128rm, X86::VMOVDQU32Z128rm },
{ X86::VMOVAPSZ256mr, X86::VMOVAPDZ256mr, X86::VMOVDQA64Z256mr, X86::VMOVDQA32Z256mr },
{ X86::VMOVAPSZ256rm, X86::VMOVAPDZ256rm, X86::VMOVDQA64Z256rm, X86::VMOVDQA32Z256rm },
{ X86::VMOVAPSZ256rr, X86::VMOVAPDZ256rr, X86::VMOVDQA64Z256rr, X86::VMOVDQA32Z256rr },
{ X86::VMOVUPSZ256mr, X86::VMOVUPDZ256mr, X86::VMOVDQU64Z256mr, X86::VMOVDQU32Z256mr },
{ X86::VMOVUPSZ256rm, X86::VMOVUPDZ256rm, X86::VMOVDQU64Z256rm, X86::VMOVDQU32Z256rm },
{ X86::VMOVAPSZmr, X86::VMOVAPDZmr, X86::VMOVDQA64Zmr, X86::VMOVDQA32Zmr },
{ X86::VMOVAPSZrm, X86::VMOVAPDZrm, X86::VMOVDQA64Zrm, X86::VMOVDQA32Zrm },
{ X86::VMOVAPSZrr, X86::VMOVAPDZrr, X86::VMOVDQA64Zrr, X86::VMOVDQA32Zrr },
{ X86::VMOVUPSZmr, X86::VMOVUPDZmr, X86::VMOVDQU64Zmr, X86::VMOVDQU32Zmr },
{ X86::VMOVUPSZrm, X86::VMOVUPDZrm, X86::VMOVDQU64Zrm, X86::VMOVDQU32Zrm },
};
static const uint16_t ReplaceableInstrsAVX512DQ[][4] = {
// Two integer columns for 64-bit and 32-bit elements.
//PackedSingle PackedDouble PackedInt PackedInt
{ X86::VANDNPSZ128rm, X86::VANDNPDZ128rm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
{ X86::VANDNPSZ128rr, X86::VANDNPDZ128rr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
{ X86::VANDPSZ128rm, X86::VANDPDZ128rm, X86::VPANDQZ128rm, X86::VPANDDZ128rm },
{ X86::VANDPSZ128rr, X86::VANDPDZ128rr, X86::VPANDQZ128rr, X86::VPANDDZ128rr },
{ X86::VORPSZ128rm, X86::VORPDZ128rm, X86::VPORQZ128rm, X86::VPORDZ128rm },
{ X86::VORPSZ128rr, X86::VORPDZ128rr, X86::VPORQZ128rr, X86::VPORDZ128rr },
{ X86::VXORPSZ128rm, X86::VXORPDZ128rm, X86::VPXORQZ128rm, X86::VPXORDZ128rm },
{ X86::VXORPSZ128rr, X86::VXORPDZ128rr, X86::VPXORQZ128rr, X86::VPXORDZ128rr },
{ X86::VANDNPSZ256rm, X86::VANDNPDZ256rm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
{ X86::VANDNPSZ256rr, X86::VANDNPDZ256rr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
{ X86::VANDPSZ256rm, X86::VANDPDZ256rm, X86::VPANDQZ256rm, X86::VPANDDZ256rm },
{ X86::VANDPSZ256rr, X86::VANDPDZ256rr, X86::VPANDQZ256rr, X86::VPANDDZ256rr },
{ X86::VORPSZ256rm, X86::VORPDZ256rm, X86::VPORQZ256rm, X86::VPORDZ256rm },
{ X86::VORPSZ256rr, X86::VORPDZ256rr, X86::VPORQZ256rr, X86::VPORDZ256rr },
{ X86::VXORPSZ256rm, X86::VXORPDZ256rm, X86::VPXORQZ256rm, X86::VPXORDZ256rm },
{ X86::VXORPSZ256rr, X86::VXORPDZ256rr, X86::VPXORQZ256rr, X86::VPXORDZ256rr },
{ X86::VANDNPSZrm, X86::VANDNPDZrm, X86::VPANDNQZrm, X86::VPANDNDZrm },
{ X86::VANDNPSZrr, X86::VANDNPDZrr, X86::VPANDNQZrr, X86::VPANDNDZrr },
{ X86::VANDPSZrm, X86::VANDPDZrm, X86::VPANDQZrm, X86::VPANDDZrm },
{ X86::VANDPSZrr, X86::VANDPDZrr, X86::VPANDQZrr, X86::VPANDDZrr },
{ X86::VORPSZrm, X86::VORPDZrm, X86::VPORQZrm, X86::VPORDZrm },
{ X86::VORPSZrr, X86::VORPDZrr, X86::VPORQZrr, X86::VPORDZrr },
{ X86::VXORPSZrm, X86::VXORPDZrm, X86::VPXORQZrm, X86::VPXORDZrm },
{ X86::VXORPSZrr, X86::VXORPDZrr, X86::VPXORQZrr, X86::VPXORDZrr },
};
static const uint16_t ReplaceableInstrsAVX512DQMasked[][4] = {
// Two integer columns for 64-bit and 32-bit elements.
//PackedSingle PackedDouble
//PackedInt PackedInt
{ X86::VANDNPSZ128rmk, X86::VANDNPDZ128rmk,
X86::VPANDNQZ128rmk, X86::VPANDNDZ128rmk },
{ X86::VANDNPSZ128rmkz, X86::VANDNPDZ128rmkz,
X86::VPANDNQZ128rmkz, X86::VPANDNDZ128rmkz },
{ X86::VANDNPSZ128rrk, X86::VANDNPDZ128rrk,
X86::VPANDNQZ128rrk, X86::VPANDNDZ128rrk },
{ X86::VANDNPSZ128rrkz, X86::VANDNPDZ128rrkz,
X86::VPANDNQZ128rrkz, X86::VPANDNDZ128rrkz },
{ X86::VANDPSZ128rmk, X86::VANDPDZ128rmk,
X86::VPANDQZ128rmk, X86::VPANDDZ128rmk },
{ X86::VANDPSZ128rmkz, X86::VANDPDZ128rmkz,
X86::VPANDQZ128rmkz, X86::VPANDDZ128rmkz },
{ X86::VANDPSZ128rrk, X86::VANDPDZ128rrk,
X86::VPANDQZ128rrk, X86::VPANDDZ128rrk },
{ X86::VANDPSZ128rrkz, X86::VANDPDZ128rrkz,
X86::VPANDQZ128rrkz, X86::VPANDDZ128rrkz },
{ X86::VORPSZ128rmk, X86::VORPDZ128rmk,
X86::VPORQZ128rmk, X86::VPORDZ128rmk },
{ X86::VORPSZ128rmkz, X86::VORPDZ128rmkz,
X86::VPORQZ128rmkz, X86::VPORDZ128rmkz },
{ X86::VORPSZ128rrk, X86::VORPDZ128rrk,
X86::VPORQZ128rrk, X86::VPORDZ128rrk },
{ X86::VORPSZ128rrkz, X86::VORPDZ128rrkz,
X86::VPORQZ128rrkz, X86::VPORDZ128rrkz },
{ X86::VXORPSZ128rmk, X86::VXORPDZ128rmk,
X86::VPXORQZ128rmk, X86::VPXORDZ128rmk },
{ X86::VXORPSZ128rmkz, X86::VXORPDZ128rmkz,
X86::VPXORQZ128rmkz, X86::VPXORDZ128rmkz },
{ X86::VXORPSZ128rrk, X86::VXORPDZ128rrk,
X86::VPXORQZ128rrk, X86::VPXORDZ128rrk },
{ X86::VXORPSZ128rrkz, X86::VXORPDZ128rrkz,
X86::VPXORQZ128rrkz, X86::VPXORDZ128rrkz },
{ X86::VANDNPSZ256rmk, X86::VANDNPDZ256rmk,
X86::VPANDNQZ256rmk, X86::VPANDNDZ256rmk },
{ X86::VANDNPSZ256rmkz, X86::VANDNPDZ256rmkz,
X86::VPANDNQZ256rmkz, X86::VPANDNDZ256rmkz },
{ X86::VANDNPSZ256rrk, X86::VANDNPDZ256rrk,
X86::VPANDNQZ256rrk, X86::VPANDNDZ256rrk },
{ X86::VANDNPSZ256rrkz, X86::VANDNPDZ256rrkz,
X86::VPANDNQZ256rrkz, X86::VPANDNDZ256rrkz },
{ X86::VANDPSZ256rmk, X86::VANDPDZ256rmk,
X86::VPANDQZ256rmk, X86::VPANDDZ256rmk },
{ X86::VANDPSZ256rmkz, X86::VANDPDZ256rmkz,
X86::VPANDQZ256rmkz, X86::VPANDDZ256rmkz },
{ X86::VANDPSZ256rrk, X86::VANDPDZ256rrk,
X86::VPANDQZ256rrk, X86::VPANDDZ256rrk },
{ X86::VANDPSZ256rrkz, X86::VANDPDZ256rrkz,
X86::VPANDQZ256rrkz, X86::VPANDDZ256rrkz },
{ X86::VORPSZ256rmk, X86::VORPDZ256rmk,
X86::VPORQZ256rmk, X86::VPORDZ256rmk },
{ X86::VORPSZ256rmkz, X86::VORPDZ256rmkz,
X86::VPORQZ256rmkz, X86::VPORDZ256rmkz },
{ X86::VORPSZ256rrk, X86::VORPDZ256rrk,
X86::VPORQZ256rrk, X86::VPORDZ256rrk },
{ X86::VORPSZ256rrkz, X86::VORPDZ256rrkz,
X86::VPORQZ256rrkz, X86::VPORDZ256rrkz },
{ X86::VXORPSZ256rmk, X86::VXORPDZ256rmk,
X86::VPXORQZ256rmk, X86::VPXORDZ256rmk },
{ X86::VXORPSZ256rmkz, X86::VXORPDZ256rmkz,
X86::VPXORQZ256rmkz, X86::VPXORDZ256rmkz },
{ X86::VXORPSZ256rrk, X86::VXORPDZ256rrk,
X86::VPXORQZ256rrk, X86::VPXORDZ256rrk },
{ X86::VXORPSZ256rrkz, X86::VXORPDZ256rrkz,
X86::VPXORQZ256rrkz, X86::VPXORDZ256rrkz },
{ X86::VANDNPSZrmk, X86::VANDNPDZrmk,
X86::VPANDNQZrmk, X86::VPANDNDZrmk },
{ X86::VANDNPSZrmkz, X86::VANDNPDZrmkz,
X86::VPANDNQZrmkz, X86::VPANDNDZrmkz },
{ X86::VANDNPSZrrk, X86::VANDNPDZrrk,
X86::VPANDNQZrrk, X86::VPANDNDZrrk },
{ X86::VANDNPSZrrkz, X86::VANDNPDZrrkz,
X86::VPANDNQZrrkz, X86::VPANDNDZrrkz },
{ X86::VANDPSZrmk, X86::VANDPDZrmk,
X86::VPANDQZrmk, X86::VPANDDZrmk },
{ X86::VANDPSZrmkz, X86::VANDPDZrmkz,
X86::VPANDQZrmkz, X86::VPANDDZrmkz },
{ X86::VANDPSZrrk, X86::VANDPDZrrk,
X86::VPANDQZrrk, X86::VPANDDZrrk },
{ X86::VANDPSZrrkz, X86::VANDPDZrrkz,
X86::VPANDQZrrkz, X86::VPANDDZrrkz },
{ X86::VORPSZrmk, X86::VORPDZrmk,
X86::VPORQZrmk, X86::VPORDZrmk },
{ X86::VORPSZrmkz, X86::VORPDZrmkz,
X86::VPORQZrmkz, X86::VPORDZrmkz },
{ X86::VORPSZrrk, X86::VORPDZrrk,
X86::VPORQZrrk, X86::VPORDZrrk },
{ X86::VORPSZrrkz, X86::VORPDZrrkz,
X86::VPORQZrrkz, X86::VPORDZrrkz },
{ X86::VXORPSZrmk, X86::VXORPDZrmk,
X86::VPXORQZrmk, X86::VPXORDZrmk },
{ X86::VXORPSZrmkz, X86::VXORPDZrmkz,
X86::VPXORQZrmkz, X86::VPXORDZrmkz },
{ X86::VXORPSZrrk, X86::VXORPDZrrk,
X86::VPXORQZrrk, X86::VPXORDZrrk },
{ X86::VXORPSZrrkz, X86::VXORPDZrrkz,
X86::VPXORQZrrkz, X86::VPXORDZrrkz },
// Broadcast loads can be handled the same as masked operations to avoid
// changing element size.
{ X86::VANDNPSZ128rmb, X86::VANDNPDZ128rmb,
X86::VPANDNQZ128rmb, X86::VPANDNDZ128rmb },
{ X86::VANDPSZ128rmb, X86::VANDPDZ128rmb,
X86::VPANDQZ128rmb, X86::VPANDDZ128rmb },
{ X86::VORPSZ128rmb, X86::VORPDZ128rmb,
X86::VPORQZ128rmb, X86::VPORDZ128rmb },
{ X86::VXORPSZ128rmb, X86::VXORPDZ128rmb,
X86::VPXORQZ128rmb, X86::VPXORDZ128rmb },
{ X86::VANDNPSZ256rmb, X86::VANDNPDZ256rmb,
X86::VPANDNQZ256rmb, X86::VPANDNDZ256rmb },
{ X86::VANDPSZ256rmb, X86::VANDPDZ256rmb,
X86::VPANDQZ256rmb, X86::VPANDDZ256rmb },
{ X86::VORPSZ256rmb, X86::VORPDZ256rmb,
X86::VPORQZ256rmb, X86::VPORDZ256rmb },
{ X86::VXORPSZ256rmb, X86::VXORPDZ256rmb,
X86::VPXORQZ256rmb, X86::VPXORDZ256rmb },
{ X86::VANDNPSZrmb, X86::VANDNPDZrmb,
X86::VPANDNQZrmb, X86::VPANDNDZrmb },
{ X86::VANDPSZrmb, X86::VANDPDZrmb,
X86::VPANDQZrmb, X86::VPANDDZrmb },
{ X86::VANDPSZrmb, X86::VANDPDZrmb,
X86::VPANDQZrmb, X86::VPANDDZrmb },
{ X86::VORPSZrmb, X86::VORPDZrmb,
X86::VPORQZrmb, X86::VPORDZrmb },
{ X86::VXORPSZrmb, X86::VXORPDZrmb,
X86::VPXORQZrmb, X86::VPXORDZrmb },
{ X86::VANDNPSZ128rmbk, X86::VANDNPDZ128rmbk,
X86::VPANDNQZ128rmbk, X86::VPANDNDZ128rmbk },
{ X86::VANDPSZ128rmbk, X86::VANDPDZ128rmbk,
X86::VPANDQZ128rmbk, X86::VPANDDZ128rmbk },
{ X86::VORPSZ128rmbk, X86::VORPDZ128rmbk,
X86::VPORQZ128rmbk, X86::VPORDZ128rmbk },
{ X86::VXORPSZ128rmbk, X86::VXORPDZ128rmbk,
X86::VPXORQZ128rmbk, X86::VPXORDZ128rmbk },
{ X86::VANDNPSZ256rmbk, X86::VANDNPDZ256rmbk,
X86::VPANDNQZ256rmbk, X86::VPANDNDZ256rmbk },
{ X86::VANDPSZ256rmbk, X86::VANDPDZ256rmbk,
X86::VPANDQZ256rmbk, X86::VPANDDZ256rmbk },
{ X86::VORPSZ256rmbk, X86::VORPDZ256rmbk,
X86::VPORQZ256rmbk, X86::VPORDZ256rmbk },
{ X86::VXORPSZ256rmbk, X86::VXORPDZ256rmbk,
X86::VPXORQZ256rmbk, X86::VPXORDZ256rmbk },
{ X86::VANDNPSZrmbk, X86::VANDNPDZrmbk,
X86::VPANDNQZrmbk, X86::VPANDNDZrmbk },
{ X86::VANDPSZrmbk, X86::VANDPDZrmbk,
X86::VPANDQZrmbk, X86::VPANDDZrmbk },
{ X86::VANDPSZrmbk, X86::VANDPDZrmbk,
X86::VPANDQZrmbk, X86::VPANDDZrmbk },
{ X86::VORPSZrmbk, X86::VORPDZrmbk,
X86::VPORQZrmbk, X86::VPORDZrmbk },
{ X86::VXORPSZrmbk, X86::VXORPDZrmbk,
X86::VPXORQZrmbk, X86::VPXORDZrmbk },
{ X86::VANDNPSZ128rmbkz,X86::VANDNPDZ128rmbkz,
X86::VPANDNQZ128rmbkz,X86::VPANDNDZ128rmbkz},
{ X86::VANDPSZ128rmbkz, X86::VANDPDZ128rmbkz,
X86::VPANDQZ128rmbkz, X86::VPANDDZ128rmbkz },
{ X86::VORPSZ128rmbkz, X86::VORPDZ128rmbkz,
X86::VPORQZ128rmbkz, X86::VPORDZ128rmbkz },
{ X86::VXORPSZ128rmbkz, X86::VXORPDZ128rmbkz,
X86::VPXORQZ128rmbkz, X86::VPXORDZ128rmbkz },
{ X86::VANDNPSZ256rmbkz,X86::VANDNPDZ256rmbkz,
X86::VPANDNQZ256rmbkz,X86::VPANDNDZ256rmbkz},
{ X86::VANDPSZ256rmbkz, X86::VANDPDZ256rmbkz,
X86::VPANDQZ256rmbkz, X86::VPANDDZ256rmbkz },
{ X86::VORPSZ256rmbkz, X86::VORPDZ256rmbkz,
X86::VPORQZ256rmbkz, X86::VPORDZ256rmbkz },
{ X86::VXORPSZ256rmbkz, X86::VXORPDZ256rmbkz,
X86::VPXORQZ256rmbkz, X86::VPXORDZ256rmbkz },
{ X86::VANDNPSZrmbkz, X86::VANDNPDZrmbkz,
X86::VPANDNQZrmbkz, X86::VPANDNDZrmbkz },
{ X86::VANDPSZrmbkz, X86::VANDPDZrmbkz,
X86::VPANDQZrmbkz, X86::VPANDDZrmbkz },
{ X86::VANDPSZrmbkz, X86::VANDPDZrmbkz,
X86::VPANDQZrmbkz, X86::VPANDDZrmbkz },
{ X86::VORPSZrmbkz, X86::VORPDZrmbkz,
X86::VPORQZrmbkz, X86::VPORDZrmbkz },
{ X86::VXORPSZrmbkz, X86::VXORPDZrmbkz,
X86::VPXORQZrmbkz, X86::VPXORDZrmbkz },
};
// NOTE: These should only be used by the custom domain methods.
static const uint16_t ReplaceableCustomInstrs[][3] = {
//PackedSingle PackedDouble PackedInt
{ X86::BLENDPSrmi, X86::BLENDPDrmi, X86::PBLENDWrmi },
{ X86::BLENDPSrri, X86::BLENDPDrri, X86::PBLENDWrri },
{ X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDWrmi },
{ X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDWrri },
{ X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDWYrmi },
{ X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDWYrri },
};
static const uint16_t ReplaceableCustomAVX2Instrs[][3] = {
//PackedSingle PackedDouble PackedInt
{ X86::VBLENDPSrmi, X86::VBLENDPDrmi, X86::VPBLENDDrmi },
{ X86::VBLENDPSrri, X86::VBLENDPDrri, X86::VPBLENDDrri },
{ X86::VBLENDPSYrmi, X86::VBLENDPDYrmi, X86::VPBLENDDYrmi },
{ X86::VBLENDPSYrri, X86::VBLENDPDYrri, X86::VPBLENDDYrri },
};
// Special table for changing EVEX logic instructions to VEX.
// TODO: Should we run EVEX->VEX earlier?
static const uint16_t ReplaceableCustomAVX512LogicInstrs[][4] = {
// Two integer columns for 64-bit and 32-bit elements.
//PackedSingle PackedDouble PackedInt PackedInt
{ X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNQZ128rm, X86::VPANDNDZ128rm },
{ X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNQZ128rr, X86::VPANDNDZ128rr },
{ X86::VANDPSrm, X86::VANDPDrm, X86::VPANDQZ128rm, X86::VPANDDZ128rm },
{ X86::VANDPSrr, X86::VANDPDrr, X86::VPANDQZ128rr, X86::VPANDDZ128rr },
{ X86::VORPSrm, X86::VORPDrm, X86::VPORQZ128rm, X86::VPORDZ128rm },
{ X86::VORPSrr, X86::VORPDrr, X86::VPORQZ128rr, X86::VPORDZ128rr },
{ X86::VXORPSrm, X86::VXORPDrm, X86::VPXORQZ128rm, X86::VPXORDZ128rm },
{ X86::VXORPSrr, X86::VXORPDrr, X86::VPXORQZ128rr, X86::VPXORDZ128rr },
{ X86::VANDNPSYrm, X86::VANDNPDYrm, X86::VPANDNQZ256rm, X86::VPANDNDZ256rm },
{ X86::VANDNPSYrr, X86::VANDNPDYrr, X86::VPANDNQZ256rr, X86::VPANDNDZ256rr },
{ X86::VANDPSYrm, X86::VANDPDYrm, X86::VPANDQZ256rm, X86::VPANDDZ256rm },
{ X86::VANDPSYrr, X86::VANDPDYrr, X86::VPANDQZ256rr, X86::VPANDDZ256rr },
{ X86::VORPSYrm, X86::VORPDYrm, X86::VPORQZ256rm, X86::VPORDZ256rm },
{ X86::VORPSYrr, X86::VORPDYrr, X86::VPORQZ256rr, X86::VPORDZ256rr },
{ X86::VXORPSYrm, X86::VXORPDYrm, X86::VPXORQZ256rm, X86::VPXORDZ256rm },
{ X86::VXORPSYrr, X86::VXORPDYrr, X86::VPXORQZ256rr, X86::VPXORDZ256rr },
};
// FIXME: Some shuffle and unpack instructions have equivalents in different
// domains, but they require a bit more work than just switching opcodes.
static const uint16_t *lookup(unsigned opcode, unsigned domain,
ArrayRef<uint16_t[3]> Table) {
for (const uint16_t (&Row)[3] : Table)
if (Row[domain-1] == opcode)
return Row;
return nullptr;
}
static const uint16_t *lookupAVX512(unsigned opcode, unsigned domain,
ArrayRef<uint16_t[4]> Table) {
// If this is the integer domain make sure to check both integer columns.
for (const uint16_t (&Row)[4] : Table)
if (Row[domain-1] == opcode || (domain == 3 && Row[3] == opcode))
return Row;
return nullptr;
}
// Helper to attempt to widen/narrow blend masks.
static bool AdjustBlendMask(unsigned OldMask, unsigned OldWidth,
unsigned NewWidth, unsigned *pNewMask = nullptr) {
assert(((OldWidth % NewWidth) == 0 || (NewWidth % OldWidth) == 0) &&
"Illegal blend mask scale");
unsigned NewMask = 0;
if ((OldWidth % NewWidth) == 0) {
unsigned Scale = OldWidth / NewWidth;
unsigned SubMask = (1u << Scale) - 1;
for (unsigned i = 0; i != NewWidth; ++i) {
unsigned Sub = (OldMask >> (i * Scale)) & SubMask;
if (Sub == SubMask)
NewMask |= (1u << i);
else if (Sub != 0x0)
return false;
}
} else {
unsigned Scale = NewWidth / OldWidth;
unsigned SubMask = (1u << Scale) - 1;
for (unsigned i = 0; i != OldWidth; ++i) {
if (OldMask & (1 << i)) {
NewMask |= (SubMask << (i * Scale));
}
}
}
if (pNewMask)
*pNewMask = NewMask;
return true;
}
uint16_t X86InstrInfo::getExecutionDomainCustom(const MachineInstr &MI) const {
unsigned Opcode = MI.getOpcode();
unsigned NumOperands = MI.getDesc().getNumOperands();
auto GetBlendDomains = [&](unsigned ImmWidth, bool Is256) {
uint16_t validDomains = 0;
if (MI.getOperand(NumOperands - 1).isImm()) {
unsigned Imm = MI.getOperand(NumOperands - 1).getImm();
if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4))
validDomains |= 0x2; // PackedSingle
if (AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2))
validDomains |= 0x4; // PackedDouble
if (!Is256 || Subtarget.hasAVX2())
validDomains |= 0x8; // PackedInt
}
return validDomains;
};
switch (Opcode) {
case X86::BLENDPDrmi:
case X86::BLENDPDrri:
case X86::VBLENDPDrmi:
case X86::VBLENDPDrri:
return GetBlendDomains(2, false);
case X86::VBLENDPDYrmi:
case X86::VBLENDPDYrri:
return GetBlendDomains(4, true);
case X86::BLENDPSrmi:
case X86::BLENDPSrri:
case X86::VBLENDPSrmi:
case X86::VBLENDPSrri:
case X86::VPBLENDDrmi:
case X86::VPBLENDDrri:
return GetBlendDomains(4, false);
case X86::VBLENDPSYrmi:
case X86::VBLENDPSYrri:
case X86::VPBLENDDYrmi:
case X86::VPBLENDDYrri:
return GetBlendDomains(8, true);
case X86::PBLENDWrmi:
case X86::PBLENDWrri:
case X86::VPBLENDWrmi:
case X86::VPBLENDWrri:
// Treat VPBLENDWY as a 128-bit vector as it repeats the lo/hi masks.
case X86::VPBLENDWYrmi:
case X86::VPBLENDWYrri:
return GetBlendDomains(8, false);
case X86::VPANDDZ128rr: case X86::VPANDDZ128rm:
case X86::VPANDDZ256rr: case X86::VPANDDZ256rm:
case X86::VPANDQZ128rr: case X86::VPANDQZ128rm:
case X86::VPANDQZ256rr: case X86::VPANDQZ256rm:
case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
case X86::VPORDZ128rr: case X86::VPORDZ128rm:
case X86::VPORDZ256rr: case X86::VPORDZ256rm:
case X86::VPORQZ128rr: case X86::VPORQZ128rm:
case X86::VPORQZ256rr: case X86::VPORQZ256rm:
case X86::VPXORDZ128rr: case X86::VPXORDZ128rm:
case X86::VPXORDZ256rr: case X86::VPXORDZ256rm:
case X86::VPXORQZ128rr: case X86::VPXORQZ128rm:
case X86::VPXORQZ256rr: case X86::VPXORQZ256rm:
// If we don't have DQI see if we can still switch from an EVEX integer
// instruction to a VEX floating point instruction.
if (Subtarget.hasDQI())
return 0;
if (RI.getEncodingValue(MI.getOperand(0).getReg()) >= 16)
return 0;
if (RI.getEncodingValue(MI.getOperand(1).getReg()) >= 16)
return 0;
// Register forms will have 3 operands. Memory form will have more.
if (NumOperands == 3 &&
RI.getEncodingValue(MI.getOperand(2).getReg()) >= 16)
return 0;
// All domains are valid.
return 0xe;
}
return 0;
}
bool X86InstrInfo::setExecutionDomainCustom(MachineInstr &MI,
unsigned Domain) const {
assert(Domain > 0 && Domain < 4 && "Invalid execution domain");
uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
assert(dom && "Not an SSE instruction");
unsigned Opcode = MI.getOpcode();
unsigned NumOperands = MI.getDesc().getNumOperands();
auto SetBlendDomain = [&](unsigned ImmWidth, bool Is256) {
if (MI.getOperand(NumOperands - 1).isImm()) {
unsigned Imm = MI.getOperand(NumOperands - 1).getImm() & 255;
Imm = (ImmWidth == 16 ? ((Imm << 8) | Imm) : Imm);
unsigned NewImm = Imm;
const uint16_t *table = lookup(Opcode, dom, ReplaceableCustomInstrs);
if (!table)
table = lookup(Opcode, dom, ReplaceableCustomAVX2Instrs);
if (Domain == 1) { // PackedSingle
AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
} else if (Domain == 2) { // PackedDouble
AdjustBlendMask(Imm, ImmWidth, Is256 ? 4 : 2, &NewImm);
} else if (Domain == 3) { // PackedInt
if (Subtarget.hasAVX2()) {
// If we are already VPBLENDW use that, else use VPBLENDD.
if ((ImmWidth / (Is256 ? 2 : 1)) != 8) {
table = lookup(Opcode, dom, ReplaceableCustomAVX2Instrs);
AdjustBlendMask(Imm, ImmWidth, Is256 ? 8 : 4, &NewImm);
}
} else {
assert(!Is256 && "128-bit vector expected");
AdjustBlendMask(Imm, ImmWidth, 8, &NewImm);
}
}
assert(table && table[Domain - 1] && "Unknown domain op");
MI.setDesc(get(table[Domain - 1]));
MI.getOperand(NumOperands - 1).setImm(NewImm & 255);
}
return true;
};
switch (Opcode) {
case X86::BLENDPDrmi:
case X86::BLENDPDrri:
case X86::VBLENDPDrmi:
case X86::VBLENDPDrri:
return SetBlendDomain(2, false);
case X86::VBLENDPDYrmi:
case X86::VBLENDPDYrri:
return SetBlendDomain(4, true);
case X86::BLENDPSrmi:
case X86::BLENDPSrri:
case X86::VBLENDPSrmi:
case X86::VBLENDPSrri:
case X86::VPBLENDDrmi:
case X86::VPBLENDDrri:
return SetBlendDomain(4, false);
case X86::VBLENDPSYrmi:
case X86::VBLENDPSYrri:
case X86::VPBLENDDYrmi:
case X86::VPBLENDDYrri:
return SetBlendDomain(8, true);
case X86::PBLENDWrmi:
case X86::PBLENDWrri:
case X86::VPBLENDWrmi:
case X86::VPBLENDWrri:
return SetBlendDomain(8, false);
case X86::VPBLENDWYrmi:
case X86::VPBLENDWYrri:
return SetBlendDomain(16, true);
case X86::VPANDDZ128rr: case X86::VPANDDZ128rm:
case X86::VPANDDZ256rr: case X86::VPANDDZ256rm:
case X86::VPANDQZ128rr: case X86::VPANDQZ128rm:
case X86::VPANDQZ256rr: case X86::VPANDQZ256rm:
case X86::VPANDNDZ128rr: case X86::VPANDNDZ128rm:
case X86::VPANDNDZ256rr: case X86::VPANDNDZ256rm:
case X86::VPANDNQZ128rr: case X86::VPANDNQZ128rm:
case X86::VPANDNQZ256rr: case X86::VPANDNQZ256rm:
case X86::VPORDZ128rr: case X86::VPORDZ128rm:
case X86::VPORDZ256rr: case X86::VPORDZ256rm:
case X86::VPORQZ128rr: case X86::VPORQZ128rm:
case X86::VPORQZ256rr: case X86::VPORQZ256rm:
case X86::VPXORDZ128rr: case X86::VPXORDZ128rm:
case X86::VPXORDZ256rr: case X86::VPXORDZ256rm:
case X86::VPXORQZ128rr: case X86::VPXORQZ128rm:
case X86::VPXORQZ256rr: case X86::VPXORQZ256rm: {
// Without DQI, convert EVEX instructions to VEX instructions.
if (Subtarget.hasDQI())
return false;
const uint16_t *table = lookupAVX512(MI.getOpcode(), dom,
ReplaceableCustomAVX512LogicInstrs);
assert(table && "Instruction not found in table?");
// Don't change integer Q instructions to D instructions and
// use D intructions if we started with a PS instruction.
if (Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
Domain = 4;
MI.setDesc(get(table[Domain - 1]));
return true;
}
}
return false;
}
std::pair<uint16_t, uint16_t>
X86InstrInfo::getExecutionDomain(const MachineInstr &MI) const {
uint16_t domain = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
unsigned opcode = MI.getOpcode();
uint16_t validDomains = 0;
if (domain) {
// Attempt to match for custom instructions.
validDomains = getExecutionDomainCustom(MI);
if (validDomains)
return std::make_pair(domain, validDomains);
if (lookup(opcode, domain, ReplaceableInstrs)) {
validDomains = 0xe;
} else if (lookup(opcode, domain, ReplaceableInstrsAVX2)) {
validDomains = Subtarget.hasAVX2() ? 0xe : 0x6;
} else if (lookup(opcode, domain, ReplaceableInstrsAVX2InsertExtract)) {
// Insert/extract instructions should only effect domain if AVX2
// is enabled.
if (!Subtarget.hasAVX2())
return std::make_pair(0, 0);
validDomains = 0xe;
} else if (lookupAVX512(opcode, domain, ReplaceableInstrsAVX512)) {
validDomains = 0xe;
} else if (Subtarget.hasDQI() && lookupAVX512(opcode, domain,
ReplaceableInstrsAVX512DQ)) {
validDomains = 0xe;
} else if (Subtarget.hasDQI()) {
if (const uint16_t *table = lookupAVX512(opcode, domain,
ReplaceableInstrsAVX512DQMasked)) {
if (domain == 1 || (domain == 3 && table[3] == opcode))
validDomains = 0xa;
else
validDomains = 0xc;
}
}
}
return std::make_pair(domain, validDomains);
}
void X86InstrInfo::setExecutionDomain(MachineInstr &MI, unsigned Domain) const {
assert(Domain>0 && Domain<4 && "Invalid execution domain");
uint16_t dom = (MI.getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
assert(dom && "Not an SSE instruction");
// Attempt to match for custom instructions.
if (setExecutionDomainCustom(MI, Domain))
return;
const uint16_t *table = lookup(MI.getOpcode(), dom, ReplaceableInstrs);
if (!table) { // try the other table
assert((Subtarget.hasAVX2() || Domain < 3) &&
"256-bit vector operations only available in AVX2");
table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2);
}
if (!table) { // try the other table
assert(Subtarget.hasAVX2() &&
"256-bit insert/extract only available in AVX2");
table = lookup(MI.getOpcode(), dom, ReplaceableInstrsAVX2InsertExtract);
}
if (!table) { // try the AVX512 table
assert(Subtarget.hasAVX512() && "Requires AVX-512");
table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512);
// Don't change integer Q instructions to D instructions.
if (table && Domain == 3 && table[3] == MI.getOpcode())
Domain = 4;
}
if (!table) { // try the AVX512DQ table
assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQ);
// Don't change integer Q instructions to D instructions and
// use D intructions if we started with a PS instruction.
if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
Domain = 4;
}
if (!table) { // try the AVX512DQMasked table
assert((Subtarget.hasDQI() || Domain >= 3) && "Requires AVX-512DQ");
table = lookupAVX512(MI.getOpcode(), dom, ReplaceableInstrsAVX512DQMasked);
if (table && Domain == 3 && (dom == 1 || table[3] == MI.getOpcode()))
Domain = 4;
}
assert(table && "Cannot change domain");
MI.setDesc(get(table[Domain - 1]));
}
/// Return the noop instruction to use for a noop.
void X86InstrInfo::getNoop(MCInst &NopInst) const {
NopInst.setOpcode(X86::NOOP);
}
bool X86InstrInfo::isHighLatencyDef(int opc) const {
switch (opc) {
default: return false;
case X86::DIVPDrm:
case X86::DIVPDrr:
case X86::DIVPSrm:
case X86::DIVPSrr:
case X86::DIVSDrm:
case X86::DIVSDrm_Int:
case X86::DIVSDrr:
case X86::DIVSDrr_Int:
case X86::DIVSSrm:
case X86::DIVSSrm_Int:
case X86::DIVSSrr:
case X86::DIVSSrr_Int:
case X86::SQRTPDm:
case X86::SQRTPDr:
case X86::SQRTPSm:
case X86::SQRTPSr:
case X86::SQRTSDm:
case X86::SQRTSDm_Int:
case X86::SQRTSDr:
case X86::SQRTSDr_Int:
case X86::SQRTSSm:
case X86::SQRTSSm_Int:
case X86::SQRTSSr:
case X86::SQRTSSr_Int:
// AVX instructions with high latency
case X86::VDIVPDrm:
case X86::VDIVPDrr:
case X86::VDIVPDYrm:
case X86::VDIVPDYrr:
case X86::VDIVPSrm:
case X86::VDIVPSrr:
case X86::VDIVPSYrm:
case X86::VDIVPSYrr:
case X86::VDIVSDrm:
case X86::VDIVSDrm_Int:
case X86::VDIVSDrr:
case X86::VDIVSDrr_Int:
case X86::VDIVSSrm:
case X86::VDIVSSrm_Int:
case X86::VDIVSSrr:
case X86::VDIVSSrr_Int:
case X86::VSQRTPDm:
case X86::VSQRTPDr:
case X86::VSQRTPDYm:
case X86::VSQRTPDYr:
case X86::VSQRTPSm:
case X86::VSQRTPSr:
case X86::VSQRTPSYm:
case X86::VSQRTPSYr:
case X86::VSQRTSDm:
case X86::VSQRTSDm_Int:
case X86::VSQRTSDr:
case X86::VSQRTSDr_Int:
case X86::VSQRTSSm:
case X86::VSQRTSSm_Int:
case X86::VSQRTSSr:
case X86::VSQRTSSr_Int:
// AVX512 instructions with high latency
case X86::VDIVPDZ128rm:
case X86::VDIVPDZ128rmb:
case X86::VDIVPDZ128rmbk:
case X86::VDIVPDZ128rmbkz:
case X86::VDIVPDZ128rmk:
case X86::VDIVPDZ128rmkz:
case X86::VDIVPDZ128rr:
case X86::VDIVPDZ128rrk:
case X86::VDIVPDZ128rrkz:
case X86::VDIVPDZ256rm:
case X86::VDIVPDZ256rmb:
case X86::VDIVPDZ256rmbk:
case X86::VDIVPDZ256rmbkz:
case X86::VDIVPDZ256rmk:
case X86::VDIVPDZ256rmkz:
case X86::VDIVPDZ256rr:
case X86::VDIVPDZ256rrk:
case X86::VDIVPDZ256rrkz:
case X86::VDIVPDZrrb:
case X86::VDIVPDZrrbk:
case X86::VDIVPDZrrbkz:
case X86::VDIVPDZrm:
case X86::VDIVPDZrmb:
case X86::VDIVPDZrmbk:
case X86::VDIVPDZrmbkz:
case X86::VDIVPDZrmk:
case X86::VDIVPDZrmkz:
case X86::VDIVPDZrr:
case X86::VDIVPDZrrk:
case X86::VDIVPDZrrkz:
case X86::VDIVPSZ128rm:
case X86::VDIVPSZ128rmb:
case X86::VDIVPSZ128rmbk:
case X86::VDIVPSZ128rmbkz:
case X86::VDIVPSZ128rmk:
case X86::VDIVPSZ128rmkz:
case X86::VDIVPSZ128rr:
case X86::VDIVPSZ128rrk:
case X86::VDIVPSZ128rrkz:
case X86::VDIVPSZ256rm:
case X86::VDIVPSZ256rmb:
case X86::VDIVPSZ256rmbk:
case X86::VDIVPSZ256rmbkz:
case X86::VDIVPSZ256rmk:
case X86::VDIVPSZ256rmkz:
case X86::VDIVPSZ256rr:
case X86::VDIVPSZ256rrk:
case X86::VDIVPSZ256rrkz:
case X86::VDIVPSZrrb:
case X86::VDIVPSZrrbk:
case X86::VDIVPSZrrbkz:
case X86::VDIVPSZrm:
case X86::VDIVPSZrmb:
case X86::VDIVPSZrmbk:
case X86::VDIVPSZrmbkz:
case X86::VDIVPSZrmk:
case X86::VDIVPSZrmkz:
case X86::VDIVPSZrr:
case X86::VDIVPSZrrk:
case X86::VDIVPSZrrkz:
case X86::VDIVSDZrm:
case X86::VDIVSDZrr:
case X86::VDIVSDZrm_Int:
case X86::VDIVSDZrm_Intk:
case X86::VDIVSDZrm_Intkz:
case X86::VDIVSDZrr_Int:
case X86::VDIVSDZrr_Intk:
case X86::VDIVSDZrr_Intkz:
case X86::VDIVSDZrrb_Int:
case X86::VDIVSDZrrb_Intk:
case X86::VDIVSDZrrb_Intkz:
case X86::VDIVSSZrm:
case X86::VDIVSSZrr:
case X86::VDIVSSZrm_Int:
case X86::VDIVSSZrm_Intk:
case X86::VDIVSSZrm_Intkz:
case X86::VDIVSSZrr_Int:
case X86::VDIVSSZrr_Intk:
case X86::VDIVSSZrr_Intkz:
case X86::VDIVSSZrrb_Int:
case X86::VDIVSSZrrb_Intk:
case X86::VDIVSSZrrb_Intkz:
case X86::VSQRTPDZ128m:
case X86::VSQRTPDZ128mb:
case X86::VSQRTPDZ128mbk:
case X86::VSQRTPDZ128mbkz:
case X86::VSQRTPDZ128mk:
case X86::VSQRTPDZ128mkz:
case X86::VSQRTPDZ128r:
case X86::VSQRTPDZ128rk:
case X86::VSQRTPDZ128rkz:
case X86::VSQRTPDZ256m:
case X86::VSQRTPDZ256mb:
case X86::VSQRTPDZ256mbk:
case X86::VSQRTPDZ256mbkz:
case X86::VSQRTPDZ256mk:
case X86::VSQRTPDZ256mkz:
case X86::VSQRTPDZ256r:
case X86::VSQRTPDZ256rk:
case X86::VSQRTPDZ256rkz:
case X86::VSQRTPDZm:
case X86::VSQRTPDZmb:
case X86::VSQRTPDZmbk:
case X86::VSQRTPDZmbkz:
case X86::VSQRTPDZmk:
case X86::VSQRTPDZmkz:
case X86::VSQRTPDZr:
case X86::VSQRTPDZrb:
case X86::VSQRTPDZrbk:
case X86::VSQRTPDZrbkz:
case X86::VSQRTPDZrk:
case X86::VSQRTPDZrkz:
case X86::VSQRTPSZ128m:
case X86::VSQRTPSZ128mb:
case X86::VSQRTPSZ128mbk:
case X86::VSQRTPSZ128mbkz:
case X86::VSQRTPSZ128mk:
case X86::VSQRTPSZ128mkz:
case X86::VSQRTPSZ128r:
case X86::VSQRTPSZ128rk:
case X86::VSQRTPSZ128rkz:
case X86::VSQRTPSZ256m:
case X86::VSQRTPSZ256mb:
case X86::VSQRTPSZ256mbk:
case X86::VSQRTPSZ256mbkz:
case X86::VSQRTPSZ256mk:
case X86::VSQRTPSZ256mkz:
case X86::VSQRTPSZ256r:
case X86::VSQRTPSZ256rk:
case X86::VSQRTPSZ256rkz:
case X86::VSQRTPSZm:
case X86::VSQRTPSZmb:
case X86::VSQRTPSZmbk:
case X86::VSQRTPSZmbkz:
case X86::VSQRTPSZmk:
case X86::VSQRTPSZmkz:
case X86::VSQRTPSZr:
case X86::VSQRTPSZrb:
case X86::VSQRTPSZrbk:
case X86::VSQRTPSZrbkz:
case X86::VSQRTPSZrk:
case X86::VSQRTPSZrkz:
case X86::VSQRTSDZm:
case X86::VSQRTSDZm_Int:
case X86::VSQRTSDZm_Intk:
case X86::VSQRTSDZm_Intkz:
case X86::VSQRTSDZr:
case X86::VSQRTSDZr_Int:
case X86::VSQRTSDZr_Intk:
case X86::VSQRTSDZr_Intkz:
case X86::VSQRTSDZrb_Int:
case X86::VSQRTSDZrb_Intk:
case X86::VSQRTSDZrb_Intkz:
case X86::VSQRTSSZm:
case X86::VSQRTSSZm_Int:
case X86::VSQRTSSZm_Intk:
case X86::VSQRTSSZm_Intkz:
case X86::VSQRTSSZr:
case X86::VSQRTSSZr_Int:
case X86::VSQRTSSZr_Intk:
case X86::VSQRTSSZr_Intkz:
case X86::VSQRTSSZrb_Int:
case X86::VSQRTSSZrb_Intk:
case X86::VSQRTSSZrb_Intkz:
case X86::VGATHERDPDYrm:
case X86::VGATHERDPDZ128rm:
case X86::VGATHERDPDZ256rm:
case X86::VGATHERDPDZrm:
case X86::VGATHERDPDrm:
case X86::VGATHERDPSYrm:
case X86::VGATHERDPSZ128rm:
case X86::VGATHERDPSZ256rm:
case X86::VGATHERDPSZrm:
case X86::VGATHERDPSrm:
case X86::VGATHERPF0DPDm:
case X86::VGATHERPF0DPSm:
case X86::VGATHERPF0QPDm:
case X86::VGATHERPF0QPSm:
case X86::VGATHERPF1DPDm:
case X86::VGATHERPF1DPSm:
case X86::VGATHERPF1QPDm:
case X86::VGATHERPF1QPSm:
case X86::VGATHERQPDYrm:
case X86::VGATHERQPDZ128rm:
case X86::VGATHERQPDZ256rm:
case X86::VGATHERQPDZrm:
case X86::VGATHERQPDrm:
case X86::VGATHERQPSYrm:
case X86::VGATHERQPSZ128rm:
case X86::VGATHERQPSZ256rm:
case X86::VGATHERQPSZrm:
case X86::VGATHERQPSrm:
case X86::VPGATHERDDYrm:
case X86::VPGATHERDDZ128rm:
case X86::VPGATHERDDZ256rm:
case X86::VPGATHERDDZrm:
case X86::VPGATHERDDrm:
case X86::VPGATHERDQYrm:
case X86::VPGATHERDQZ128rm:
case X86::VPGATHERDQZ256rm:
case X86::VPGATHERDQZrm:
case X86::VPGATHERDQrm:
case X86::VPGATHERQDYrm:
case X86::VPGATHERQDZ128rm:
case X86::VPGATHERQDZ256rm:
case X86::VPGATHERQDZrm:
case X86::VPGATHERQDrm:
case X86::VPGATHERQQYrm:
case X86::VPGATHERQQZ128rm:
case X86::VPGATHERQQZ256rm:
case X86::VPGATHERQQZrm:
case X86::VPGATHERQQrm:
case X86::VSCATTERDPDZ128mr:
case X86::VSCATTERDPDZ256mr:
case X86::VSCATTERDPDZmr:
case X86::VSCATTERDPSZ128mr:
case X86::VSCATTERDPSZ256mr:
case X86::VSCATTERDPSZmr:
case X86::VSCATTERPF0DPDm:
case X86::VSCATTERPF0DPSm:
case X86::VSCATTERPF0QPDm:
case X86::VSCATTERPF0QPSm:
case X86::VSCATTERPF1DPDm:
case X86::VSCATTERPF1DPSm:
case X86::VSCATTERPF1QPDm:
case X86::VSCATTERPF1QPSm:
case X86::VSCATTERQPDZ128mr:
case X86::VSCATTERQPDZ256mr:
case X86::VSCATTERQPDZmr:
case X86::VSCATTERQPSZ128mr:
case X86::VSCATTERQPSZ256mr:
case X86::VSCATTERQPSZmr:
case X86::VPSCATTERDDZ128mr:
case X86::VPSCATTERDDZ256mr:
case X86::VPSCATTERDDZmr:
case X86::VPSCATTERDQZ128mr:
case X86::VPSCATTERDQZ256mr:
case X86::VPSCATTERDQZmr:
case X86::VPSCATTERQDZ128mr:
case X86::VPSCATTERQDZ256mr:
case X86::VPSCATTERQDZmr:
case X86::VPSCATTERQQZ128mr:
case X86::VPSCATTERQQZ256mr:
case X86::VPSCATTERQQZmr:
return true;
}
}
bool X86InstrInfo::hasHighOperandLatency(const TargetSchedModel &SchedModel,
const MachineRegisterInfo *MRI,
const MachineInstr &DefMI,
unsigned DefIdx,
const MachineInstr &UseMI,
unsigned UseIdx) const {
return isHighLatencyDef(DefMI.getOpcode());
}
bool X86InstrInfo::hasReassociableOperands(const MachineInstr &Inst,
const MachineBasicBlock *MBB) const {
assert((Inst.getNumOperands() == 3 || Inst.getNumOperands() == 4) &&
"Reassociation needs binary operators");
// Integer binary math/logic instructions have a third source operand:
// the EFLAGS register. That operand must be both defined here and never
// used; ie, it must be dead. If the EFLAGS operand is live, then we can
// not change anything because rearranging the operands could affect other
// instructions that depend on the exact status flags (zero, sign, etc.)
// that are set by using these particular operands with this operation.
if (Inst.getNumOperands() == 4) {
assert(Inst.getOperand(3).isReg() &&
Inst.getOperand(3).getReg() == X86::EFLAGS &&
"Unexpected operand in reassociable instruction");
if (!Inst.getOperand(3).isDead())
return false;
}
return TargetInstrInfo::hasReassociableOperands(Inst, MBB);
}
// TODO: There are many more machine instruction opcodes to match:
// 1. Other data types (integer, vectors)
// 2. Other math / logic operations (xor, or)
// 3. Other forms of the same operation (intrinsics and other variants)
bool X86InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
switch (Inst.getOpcode()) {
case X86::AND8rr:
case X86::AND16rr:
case X86::AND32rr:
case X86::AND64rr:
case X86::OR8rr:
case X86::OR16rr:
case X86::OR32rr:
case X86::OR64rr:
case X86::XOR8rr:
case X86::XOR16rr:
case X86::XOR32rr:
case X86::XOR64rr:
case X86::IMUL16rr:
case X86::IMUL32rr:
case X86::IMUL64rr:
case X86::PANDrr:
case X86::PORrr:
case X86::PXORrr:
case X86::ANDPDrr:
case X86::ANDPSrr:
case X86::ORPDrr:
case X86::ORPSrr:
case X86::XORPDrr:
case X86::XORPSrr:
case X86::PADDBrr:
case X86::PADDWrr:
case X86::PADDDrr:
case X86::PADDQrr:
case X86::VPANDrr:
case X86::VPANDYrr:
case X86::VPANDDZ128rr:
case X86::VPANDDZ256rr:
case X86::VPANDDZrr:
case X86::VPANDQZ128rr:
case X86::VPANDQZ256rr:
case X86::VPANDQZrr:
case X86::VPORrr:
case X86::VPORYrr:
case X86::VPORDZ128rr:
case X86::VPORDZ256rr:
case X86::VPORDZrr:
case X86::VPORQZ128rr:
case X86::VPORQZ256rr:
case X86::VPORQZrr:
case X86::VPXORrr:
case X86::VPXORYrr:
case X86::VPXORDZ128rr:
case X86::VPXORDZ256rr:
case X86::VPXORDZrr:
case X86::VPXORQZ128rr:
case X86::VPXORQZ256rr:
case X86::VPXORQZrr:
case X86::VANDPDrr:
case X86::VANDPSrr:
case X86::VANDPDYrr:
case X86::VANDPSYrr:
case X86::VANDPDZ128rr:
case X86::VANDPSZ128rr:
case X86::VANDPDZ256rr:
case X86::VANDPSZ256rr:
case X86::VANDPDZrr:
case X86::VANDPSZrr:
case X86::VORPDrr:
case X86::VORPSrr:
case X86::VORPDYrr:
case X86::VORPSYrr:
case X86::VORPDZ128rr:
case X86::VORPSZ128rr:
case X86::VORPDZ256rr:
case X86::VORPSZ256rr:
case X86::VORPDZrr:
case X86::VORPSZrr:
case X86::VXORPDrr:
case X86::VXORPSrr:
case X86::VXORPDYrr:
case X86::VXORPSYrr:
case X86::VXORPDZ128rr:
case X86::VXORPSZ128rr:
case X86::VXORPDZ256rr:
case X86::VXORPSZ256rr:
case X86::VXORPDZrr:
case X86::VXORPSZrr:
case X86::KADDBrr:
case X86::KADDWrr:
case X86::KADDDrr:
case X86::KADDQrr:
case X86::KANDBrr:
case X86::KANDWrr:
case X86::KANDDrr:
case X86::KANDQrr:
case X86::KORBrr:
case X86::KORWrr:
case X86::KORDrr:
case X86::KORQrr:
case X86::KXORBrr:
case X86::KXORWrr:
case X86::KXORDrr:
case X86::KXORQrr:
case X86::VPADDBrr:
case X86::VPADDWrr:
case X86::VPADDDrr:
case X86::VPADDQrr:
case X86::VPADDBYrr:
case X86::VPADDWYrr:
case X86::VPADDDYrr:
case X86::VPADDQYrr:
case X86::VPADDBZ128rr:
case X86::VPADDWZ128rr:
case X86::VPADDDZ128rr:
case X86::VPADDQZ128rr:
case X86::VPADDBZ256rr:
case X86::VPADDWZ256rr:
case X86::VPADDDZ256rr:
case X86::VPADDQZ256rr:
case X86::VPADDBZrr:
case X86::VPADDWZrr:
case X86::VPADDDZrr:
case X86::VPADDQZrr:
case X86::VPMULLWrr:
case X86::VPMULLWYrr:
case X86::VPMULLWZ128rr:
case X86::VPMULLWZ256rr:
case X86::VPMULLWZrr:
case X86::VPMULLDrr:
case X86::VPMULLDYrr:
case X86::VPMULLDZ128rr:
case X86::VPMULLDZ256rr:
case X86::VPMULLDZrr:
case X86::VPMULLQZ128rr:
case X86::VPMULLQZ256rr:
case X86::VPMULLQZrr:
// Normal min/max instructions are not commutative because of NaN and signed
// zero semantics, but these are. Thus, there's no need to check for global
// relaxed math; the instructions themselves have the properties we need.
case X86::MAXCPDrr:
case X86::MAXCPSrr:
case X86::MAXCSDrr:
case X86::MAXCSSrr:
case X86::MINCPDrr:
case X86::MINCPSrr:
case X86::MINCSDrr:
case X86::MINCSSrr:
case X86::VMAXCPDrr:
case X86::VMAXCPSrr:
case X86::VMAXCPDYrr:
case X86::VMAXCPSYrr:
case X86::VMAXCPDZ128rr:
case X86::VMAXCPSZ128rr:
case X86::VMAXCPDZ256rr:
case X86::VMAXCPSZ256rr:
case X86::VMAXCPDZrr:
case X86::VMAXCPSZrr:
case X86::VMAXCSDrr:
case X86::VMAXCSSrr:
case X86::VMAXCSDZrr:
case X86::VMAXCSSZrr:
case X86::VMINCPDrr:
case X86::VMINCPSrr:
case X86::VMINCPDYrr:
case X86::VMINCPSYrr:
case X86::VMINCPDZ128rr:
case X86::VMINCPSZ128rr:
case X86::VMINCPDZ256rr:
case X86::VMINCPSZ256rr:
case X86::VMINCPDZrr:
case X86::VMINCPSZrr:
case X86::VMINCSDrr:
case X86::VMINCSSrr:
case X86::VMINCSDZrr:
case X86::VMINCSSZrr:
return true;
case X86::ADDPDrr:
case X86::ADDPSrr:
case X86::ADDSDrr:
case X86::ADDSSrr:
case X86::MULPDrr:
case X86::MULPSrr:
case X86::MULSDrr:
case X86::MULSSrr:
case X86::VADDPDrr:
case X86::VADDPSrr:
case X86::VADDPDYrr:
case X86::VADDPSYrr:
case X86::VADDPDZ128rr:
case X86::VADDPSZ128rr:
case X86::VADDPDZ256rr:
case X86::VADDPSZ256rr:
case X86::VADDPDZrr:
case X86::VADDPSZrr:
case X86::VADDSDrr:
case X86::VADDSSrr:
case X86::VADDSDZrr:
case X86::VADDSSZrr:
case X86::VMULPDrr:
case X86::VMULPSrr:
case X86::VMULPDYrr:
case X86::VMULPSYrr:
case X86::VMULPDZ128rr:
case X86::VMULPSZ128rr:
case X86::VMULPDZ256rr:
case X86::VMULPSZ256rr:
case X86::VMULPDZrr:
case X86::VMULPSZrr:
case X86::VMULSDrr:
case X86::VMULSSrr:
case X86::VMULSDZrr:
case X86::VMULSSZrr:
return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
default:
return false;
}
}
/// This is an architecture-specific helper function of reassociateOps.
/// Set special operand attributes for new instructions after reassociation.
void X86InstrInfo::setSpecialOperandAttr(MachineInstr &OldMI1,
MachineInstr &OldMI2,
MachineInstr &NewMI1,
MachineInstr &NewMI2) const {
// Integer instructions define an implicit EFLAGS source register operand as
// the third source (fourth total) operand.
if (OldMI1.getNumOperands() != 4 || OldMI2.getNumOperands() != 4)
return;
assert(NewMI1.getNumOperands() == 4 && NewMI2.getNumOperands() == 4 &&
"Unexpected instruction type for reassociation");
MachineOperand &OldOp1 = OldMI1.getOperand(3);
MachineOperand &OldOp2 = OldMI2.getOperand(3);
MachineOperand &NewOp1 = NewMI1.getOperand(3);
MachineOperand &NewOp2 = NewMI2.getOperand(3);
assert(OldOp1.isReg() && OldOp1.getReg() == X86::EFLAGS && OldOp1.isDead() &&
"Must have dead EFLAGS operand in reassociable instruction");
assert(OldOp2.isReg() && OldOp2.getReg() == X86::EFLAGS && OldOp2.isDead() &&
"Must have dead EFLAGS operand in reassociable instruction");
(void)OldOp1;
(void)OldOp2;
assert(NewOp1.isReg() && NewOp1.getReg() == X86::EFLAGS &&
"Unexpected operand in reassociable instruction");
assert(NewOp2.isReg() && NewOp2.getReg() == X86::EFLAGS &&
"Unexpected operand in reassociable instruction");
// Mark the new EFLAGS operands as dead to be helpful to subsequent iterations
// of this pass or other passes. The EFLAGS operands must be dead in these new
// instructions because the EFLAGS operands in the original instructions must
// be dead in order for reassociation to occur.
NewOp1.setIsDead();
NewOp2.setIsDead();
}
std::pair<unsigned, unsigned>
X86InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
return std::make_pair(TF, 0u);
}
ArrayRef<std::pair<unsigned, const char *>>
X86InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
using namespace X86II;
static const std::pair<unsigned, const char *> TargetFlags[] = {
{MO_GOT_ABSOLUTE_ADDRESS, "x86-got-absolute-address"},
{MO_PIC_BASE_OFFSET, "x86-pic-base-offset"},
{MO_GOT, "x86-got"},
{MO_GOTOFF, "x86-gotoff"},
{MO_GOTPCREL, "x86-gotpcrel"},
{MO_PLT, "x86-plt"},
{MO_TLSGD, "x86-tlsgd"},
{MO_TLSLD, "x86-tlsld"},
{MO_TLSLDM, "x86-tlsldm"},
{MO_GOTTPOFF, "x86-gottpoff"},
{MO_INDNTPOFF, "x86-indntpoff"},
{MO_TPOFF, "x86-tpoff"},
{MO_DTPOFF, "x86-dtpoff"},
{MO_NTPOFF, "x86-ntpoff"},
{MO_GOTNTPOFF, "x86-gotntpoff"},
{MO_DLLIMPORT, "x86-dllimport"},
{MO_DARWIN_NONLAZY, "x86-darwin-nonlazy"},
{MO_DARWIN_NONLAZY_PIC_BASE, "x86-darwin-nonlazy-pic-base"},
{MO_TLVP, "x86-tlvp"},
{MO_TLVP_PIC_BASE, "x86-tlvp-pic-base"},
{MO_SECREL, "x86-secrel"}};
return makeArrayRef(TargetFlags);
}
namespace {
/// Create Global Base Reg pass. This initializes the PIC
/// global base register for x86-32.
struct CGBR : public MachineFunctionPass {
static char ID;
CGBR() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override {
const X86TargetMachine *TM =
static_cast<const X86TargetMachine *>(&MF.getTarget());
const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
// Don't do anything in the 64-bit small and kernel code models. They use
// RIP-relative addressing for everything.
if (STI.is64Bit() && (TM->getCodeModel() == CodeModel::Small ||
TM->getCodeModel() == CodeModel::Kernel))
return false;
// Only emit a global base reg in PIC mode.
if (!TM->isPositionIndependent())
return false;
X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
// If we didn't need a GlobalBaseReg, don't insert code.
if (GlobalBaseReg == 0)
return false;
// Insert the set of GlobalBaseReg into the first MBB of the function
MachineBasicBlock &FirstMBB = MF.front();
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
MachineRegisterInfo &RegInfo = MF.getRegInfo();
const X86InstrInfo *TII = STI.getInstrInfo();
unsigned PC;
if (STI.isPICStyleGOT())
PC = RegInfo.createVirtualRegister(&X86::GR32RegClass);
else
PC = GlobalBaseReg;
if (STI.is64Bit()) {
if (TM->getCodeModel() == CodeModel::Medium) {
// In the medium code model, use a RIP-relative LEA to materialize the
// GOT.
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::LEA64r), PC)
.addReg(X86::RIP)
.addImm(0)
.addReg(0)
.addExternalSymbol("_GLOBAL_OFFSET_TABLE_")
.addReg(0);
} else if (TM->getCodeModel() == CodeModel::Large) {
// Loading the GOT in the large code model requires math with labels,
// so we use a pseudo instruction and expand it during MC emission.
unsigned Scratch = RegInfo.createVirtualRegister(&X86::GR64RegClass);
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVGOT64r), PC)
.addReg(Scratch, RegState::Undef | RegState::Define)
.addExternalSymbol("_GLOBAL_OFFSET_TABLE_");
} else {
llvm_unreachable("unexpected code model");
}
} else {
// Operand of MovePCtoStack is completely ignored by asm printer. It's
// only used in JIT code emission as displacement to pc.
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
// If we're using vanilla 'GOT' PIC style, we should use relative
// addressing not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
if (STI.isPICStyleGOT()) {
// Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel],
// %some_register
BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
.addReg(PC)
.addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
X86II::MO_GOT_ABSOLUTE_ADDRESS);
}
}
return true;
}
StringRef getPassName() const override {
return "X86 PIC Global Base Reg Initialization";
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
};
}
char CGBR::ID = 0;
FunctionPass*
llvm::createX86GlobalBaseRegPass() { return new CGBR(); }
namespace {
struct LDTLSCleanup : public MachineFunctionPass {
static char ID;
LDTLSCleanup() : MachineFunctionPass(ID) {}
bool runOnMachineFunction(MachineFunction &MF) override {
if (skipFunction(MF.getFunction()))
return false;
X86MachineFunctionInfo *MFI = MF.getInfo<X86MachineFunctionInfo>();
if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
// No point folding accesses if there isn't at least two.
return false;
}
MachineDominatorTree *DT = &getAnalysis<MachineDominatorTree>();
return VisitNode(DT->getRootNode(), 0);
}
// Visit the dominator subtree rooted at Node in pre-order.
// If TLSBaseAddrReg is non-null, then use that to replace any
// TLS_base_addr instructions. Otherwise, create the register
// when the first such instruction is seen, and then use it
// as we encounter more instructions.
bool VisitNode(MachineDomTreeNode *Node, unsigned TLSBaseAddrReg) {
MachineBasicBlock *BB = Node->getBlock();
bool Changed = false;
// Traverse the current block.
for (MachineBasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;
++I) {
switch (I->getOpcode()) {
case X86::TLS_base_addr32:
case X86::TLS_base_addr64:
if (TLSBaseAddrReg)
I = ReplaceTLSBaseAddrCall(*I, TLSBaseAddrReg);
else
I = SetRegister(*I, &TLSBaseAddrReg);
Changed = true;
break;
default:
break;
}
}
// Visit the children of this block in the dominator tree.
for (MachineDomTreeNode::iterator I = Node->begin(), E = Node->end();
I != E; ++I) {
Changed |= VisitNode(*I, TLSBaseAddrReg);
}
return Changed;
}
// Replace the TLS_base_addr instruction I with a copy from
// TLSBaseAddrReg, returning the new instruction.
MachineInstr *ReplaceTLSBaseAddrCall(MachineInstr &I,
unsigned TLSBaseAddrReg) {
MachineFunction *MF = I.getParent()->getParent();
const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
const bool is64Bit = STI.is64Bit();
const X86InstrInfo *TII = STI.getInstrInfo();
// Insert a Copy from TLSBaseAddrReg to RAX/EAX.
MachineInstr *Copy =
BuildMI(*I.getParent(), I, I.getDebugLoc(),
TII->get(TargetOpcode::COPY), is64Bit ? X86::RAX : X86::EAX)
.addReg(TLSBaseAddrReg);
// Erase the TLS_base_addr instruction.
I.eraseFromParent();
return Copy;
}
// Create a virtual register in *TLSBaseAddrReg, and populate it by
// inserting a copy instruction after I. Returns the new instruction.
MachineInstr *SetRegister(MachineInstr &I, unsigned *TLSBaseAddrReg) {
MachineFunction *MF = I.getParent()->getParent();
const X86Subtarget &STI = MF->getSubtarget<X86Subtarget>();
const bool is64Bit = STI.is64Bit();
const X86InstrInfo *TII = STI.getInstrInfo();
// Create a virtual register for the TLS base address.
MachineRegisterInfo &RegInfo = MF->getRegInfo();
*TLSBaseAddrReg = RegInfo.createVirtualRegister(is64Bit
? &X86::GR64RegClass
: &X86::GR32RegClass);
// Insert a copy from RAX/EAX to TLSBaseAddrReg.
MachineInstr *Next = I.getNextNode();
MachineInstr *Copy =
BuildMI(*I.getParent(), Next, I.getDebugLoc(),
TII->get(TargetOpcode::COPY), *TLSBaseAddrReg)
.addReg(is64Bit ? X86::RAX : X86::EAX);
return Copy;
}
StringRef getPassName() const override {
return "Local Dynamic TLS Access Clean-up";
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<MachineDominatorTree>();
MachineFunctionPass::getAnalysisUsage(AU);
}
};
}
char LDTLSCleanup::ID = 0;
FunctionPass*
llvm::createCleanupLocalDynamicTLSPass() { return new LDTLSCleanup(); }
/// Constants defining how certain sequences should be outlined.
///
/// \p MachineOutlinerDefault implies that the function is called with a call
/// instruction, and a return must be emitted for the outlined function frame.
///
/// That is,
///
/// I1 OUTLINED_FUNCTION:
/// I2 --> call OUTLINED_FUNCTION I1
/// I3 I2
/// I3
/// ret
///
/// * Call construction overhead: 1 (call instruction)
/// * Frame construction overhead: 1 (return instruction)
///
/// \p MachineOutlinerTailCall implies that the function is being tail called.
/// A jump is emitted instead of a call, and the return is already present in
/// the outlined sequence. That is,
///
/// I1 OUTLINED_FUNCTION:
/// I2 --> jmp OUTLINED_FUNCTION I1
/// ret I2
/// ret
///
/// * Call construction overhead: 1 (jump instruction)
/// * Frame construction overhead: 0 (don't need to return)
///
enum MachineOutlinerClass {
MachineOutlinerDefault,
MachineOutlinerTailCall
};
outliner::OutlinedFunction X86InstrInfo::getOutliningCandidateInfo(
std::vector<outliner::Candidate> &RepeatedSequenceLocs) const {
unsigned SequenceSize =
std::accumulate(RepeatedSequenceLocs[0].front(),
std::next(RepeatedSequenceLocs[0].back()), 0,
[](unsigned Sum, const MachineInstr &MI) {
// FIXME: x86 doesn't implement getInstSizeInBytes, so
// we can't tell the cost. Just assume each instruction
// is one byte.
if (MI.isDebugInstr() || MI.isKill())
return Sum;
return Sum + 1;
});
// FIXME: Use real size in bytes for call and ret instructions.
if (RepeatedSequenceLocs[0].back()->isTerminator()) {
for (outliner::Candidate &C : RepeatedSequenceLocs)
C.setCallInfo(MachineOutlinerTailCall, 1);
return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize,
0, // Number of bytes to emit frame.
MachineOutlinerTailCall // Type of frame.
);
}
for (outliner::Candidate &C : RepeatedSequenceLocs)
C.setCallInfo(MachineOutlinerDefault, 1);
return outliner::OutlinedFunction(RepeatedSequenceLocs, SequenceSize, 1,
MachineOutlinerDefault);
}
bool X86InstrInfo::isFunctionSafeToOutlineFrom(MachineFunction &MF,
bool OutlineFromLinkOnceODRs) const {
const Function &F = MF.getFunction();
// Does the function use a red zone? If it does, then we can't risk messing
// with the stack.
if (!F.hasFnAttribute(Attribute::NoRedZone)) {
// It could have a red zone. If it does, then we don't want to touch it.
const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
if (!X86FI || X86FI->getUsesRedZone())
return false;
}
// If we *don't* want to outline from things that could potentially be deduped
// then return false.
if (!OutlineFromLinkOnceODRs && F.hasLinkOnceODRLinkage())
return false;
// This function is viable for outlining, so return true.
return true;
}
outliner::InstrType
X86InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT, unsigned Flags) const {
MachineInstr &MI = *MIT;
// Don't allow debug values to impact outlining type.
if (MI.isDebugInstr() || MI.isIndirectDebugValue())
return outliner::InstrType::Invisible;
// At this point, KILL instructions don't really tell us much so we can go
// ahead and skip over them.
if (MI.isKill())
return outliner::InstrType::Invisible;
// Is this a tail call? If yes, we can outline as a tail call.
if (isTailCall(MI))
return outliner::InstrType::Legal;
// Is this the terminator of a basic block?
if (MI.isTerminator() || MI.isReturn()) {
// Does its parent have any successors in its MachineFunction?
if (MI.getParent()->succ_empty())
return outliner::InstrType::Legal;
// It does, so we can't tail call it.
return outliner::InstrType::Illegal;
}
// Don't outline anything that modifies or reads from the stack pointer.
//
// FIXME: There are instructions which are being manually built without
// explicit uses/defs so we also have to check the MCInstrDesc. We should be
// able to remove the extra checks once those are fixed up. For example,
// sometimes we might get something like %rax = POP64r 1. This won't be
// caught by modifiesRegister or readsRegister even though the instruction
// really ought to be formed so that modifiesRegister/readsRegister would
// catch it.
if (MI.modifiesRegister(X86::RSP, &RI) || MI.readsRegister(X86::RSP, &RI) ||
MI.getDesc().hasImplicitUseOfPhysReg(X86::RSP) ||
MI.getDesc().hasImplicitDefOfPhysReg(X86::RSP))
return outliner::InstrType::Illegal;
// Outlined calls change the instruction pointer, so don't read from it.
if (MI.readsRegister(X86::RIP, &RI) ||
MI.getDesc().hasImplicitUseOfPhysReg(X86::RIP) ||
MI.getDesc().hasImplicitDefOfPhysReg(X86::RIP))
return outliner::InstrType::Illegal;
// Positions can't safely be outlined.
if (MI.isPosition())
return outliner::InstrType::Illegal;
// Make sure none of the operands of this instruction do anything tricky.
for (const MachineOperand &MOP : MI.operands())
if (MOP.isCPI() || MOP.isJTI() || MOP.isCFIIndex() || MOP.isFI() ||
MOP.isTargetIndex())
return outliner::InstrType::Illegal;
return outliner::InstrType::Legal;
}
void X86InstrInfo::buildOutlinedFrame(MachineBasicBlock &MBB,
MachineFunction &MF,
const outliner::OutlinedFunction &OF)
const {
// If we're a tail call, we already have a return, so don't do anything.
if (OF.FrameConstructionID == MachineOutlinerTailCall)
return;
// We're a normal call, so our sequence doesn't have a return instruction.
// Add it in.
MachineInstr *retq = BuildMI(MF, DebugLoc(), get(X86::RETQ));
MBB.insert(MBB.end(), retq);
}
MachineBasicBlock::iterator
X86InstrInfo::insertOutlinedCall(Module &M, MachineBasicBlock &MBB,
MachineBasicBlock::iterator &It,
MachineFunction &MF,
const outliner::Candidate &C) const {
// Is it a tail call?
if (C.CallConstructionID == MachineOutlinerTailCall) {
// Yes, just insert a JMP.
It = MBB.insert(It,
BuildMI(MF, DebugLoc(), get(X86::TAILJMPd64))
.addGlobalAddress(M.getNamedValue(MF.getName())));
} else {
// No, insert a call.
It = MBB.insert(It,
BuildMI(MF, DebugLoc(), get(X86::CALL64pcrel32))
.addGlobalAddress(M.getNamedValue(MF.getName())));
}
return It;
}
Index: projects/clang700-import/contrib/llvm/lib/Transforms/IPO/FunctionImport.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/lib/Transforms/IPO/FunctionImport.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/lib/Transforms/IPO/FunctionImport.cpp (revision 340125)
@@ -1,1133 +1,1132 @@
//===- FunctionImport.cpp - ThinLTO Summary-based Function Import ---------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements Function import based on summaries.
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/IPO/FunctionImport.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/IR/AutoUpgrade.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalObject.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/IRReader/IRReader.h"
#include "llvm/Linker/IRMover.h"
#include "llvm/Object/ModuleSymbolTable.h"
#include "llvm/Object/SymbolicFile.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/IPO/Internalize.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/FunctionImportUtils.h"
#include "llvm/Transforms/Utils/ValueMapper.h"
#include <cassert>
#include <memory>
#include <set>
#include <string>
#include <system_error>
#include <tuple>
#include <utility>
using namespace llvm;
#define DEBUG_TYPE "function-import"
STATISTIC(NumImportedFunctions, "Number of functions imported");
STATISTIC(NumImportedGlobalVars, "Number of global variables imported");
STATISTIC(NumImportedModules, "Number of modules imported from");
STATISTIC(NumDeadSymbols, "Number of dead stripped symbols in index");
STATISTIC(NumLiveSymbols, "Number of live symbols in index");
/// Limit on instruction count of imported functions.
static cl::opt<unsigned> ImportInstrLimit(
"import-instr-limit", cl::init(100), cl::Hidden, cl::value_desc("N"),
cl::desc("Only import functions with less than N instructions"));
static cl::opt<int> ImportCutoff(
"import-cutoff", cl::init(-1), cl::Hidden, cl::value_desc("N"),
cl::desc("Only import first N functions if N>=0 (default -1)"));
static cl::opt<float>
ImportInstrFactor("import-instr-evolution-factor", cl::init(0.7),
cl::Hidden, cl::value_desc("x"),
cl::desc("As we import functions, multiply the "
"`import-instr-limit` threshold by this factor "
"before processing newly imported functions"));
static cl::opt<float> ImportHotInstrFactor(
"import-hot-evolution-factor", cl::init(1.0), cl::Hidden,
cl::value_desc("x"),
cl::desc("As we import functions called from hot callsite, multiply the "
"`import-instr-limit` threshold by this factor "
"before processing newly imported functions"));
static cl::opt<float> ImportHotMultiplier(
"import-hot-multiplier", cl::init(10.0), cl::Hidden, cl::value_desc("x"),
cl::desc("Multiply the `import-instr-limit` threshold for hot callsites"));
static cl::opt<float> ImportCriticalMultiplier(
"import-critical-multiplier", cl::init(100.0), cl::Hidden,
cl::value_desc("x"),
cl::desc(
"Multiply the `import-instr-limit` threshold for critical callsites"));
// FIXME: This multiplier was not really tuned up.
static cl::opt<float> ImportColdMultiplier(
"import-cold-multiplier", cl::init(0), cl::Hidden, cl::value_desc("N"),
cl::desc("Multiply the `import-instr-limit` threshold for cold callsites"));
static cl::opt<bool> PrintImports("print-imports", cl::init(false), cl::Hidden,
cl::desc("Print imported functions"));
static cl::opt<bool> ComputeDead("compute-dead", cl::init(true), cl::Hidden,
cl::desc("Compute dead symbols"));
static cl::opt<bool> EnableImportMetadata(
"enable-import-metadata", cl::init(
#if !defined(NDEBUG)
true /*Enabled with asserts.*/
#else
false
#endif
),
cl::Hidden, cl::desc("Enable import metadata like 'thinlto_src_module'"));
/// Summary file to use for function importing when using -function-import from
/// the command line.
static cl::opt<std::string>
SummaryFile("summary-file",
cl::desc("The summary file to use for function importing."));
/// Used when testing importing from distributed indexes via opt
// -function-import.
static cl::opt<bool>
ImportAllIndex("import-all-index",
cl::desc("Import all external functions in index."));
// Load lazily a module from \p FileName in \p Context.
static std::unique_ptr<Module> loadFile(const std::string &FileName,
LLVMContext &Context) {
SMDiagnostic Err;
LLVM_DEBUG(dbgs() << "Loading '" << FileName << "'\n");
// Metadata isn't loaded until functions are imported, to minimize
// the memory overhead.
std::unique_ptr<Module> Result =
getLazyIRFileModule(FileName, Err, Context,
/* ShouldLazyLoadMetadata = */ true);
if (!Result) {
Err.print("function-import", errs());
report_fatal_error("Abort");
}
return Result;
}
/// Given a list of possible callee implementation for a call site, select one
/// that fits the \p Threshold.
///
/// FIXME: select "best" instead of first that fits. But what is "best"?
/// - The smallest: more likely to be inlined.
/// - The one with the least outgoing edges (already well optimized).
/// - One from a module already being imported from in order to reduce the
/// number of source modules parsed/linked.
/// - One that has PGO data attached.
/// - [insert you fancy metric here]
static const GlobalValueSummary *
selectCallee(const ModuleSummaryIndex &Index,
ArrayRef<std::unique_ptr<GlobalValueSummary>> CalleeSummaryList,
unsigned Threshold, StringRef CallerModulePath) {
auto It = llvm::find_if(
CalleeSummaryList,
[&](const std::unique_ptr<GlobalValueSummary> &SummaryPtr) {
auto *GVSummary = SummaryPtr.get();
if (!Index.isGlobalValueLive(GVSummary))
return false;
// For SamplePGO, in computeImportForFunction the OriginalId
// may have been used to locate the callee summary list (See
// comment there).
// The mapping from OriginalId to GUID may return a GUID
// that corresponds to a static variable. Filter it out here.
// This can happen when
// 1) There is a call to a library function which is not defined
// in the index.
// 2) There is a static variable with the OriginalGUID identical
// to the GUID of the library function in 1);
// When this happens, the logic for SamplePGO kicks in and
// the static variable in 2) will be found, which needs to be
// filtered out.
if (GVSummary->getSummaryKind() == GlobalValueSummary::GlobalVarKind)
return false;
if (GlobalValue::isInterposableLinkage(GVSummary->linkage()))
// There is no point in importing these, we can't inline them
return false;
auto *Summary = cast<FunctionSummary>(GVSummary->getBaseObject());
// If this is a local function, make sure we import the copy
// in the caller's module. The only time a local function can
// share an entry in the index is if there is a local with the same name
// in another module that had the same source file name (in a different
// directory), where each was compiled in their own directory so there
// was not distinguishing path.
// However, do the import from another module if there is only one
// entry in the list - in that case this must be a reference due
// to indirect call profile data, since a function pointer can point to
// a local in another module.
if (GlobalValue::isLocalLinkage(Summary->linkage()) &&
CalleeSummaryList.size() > 1 &&
Summary->modulePath() != CallerModulePath)
return false;
if (Summary->instCount() > Threshold)
return false;
if (Summary->notEligibleToImport())
return false;
return true;
});
if (It == CalleeSummaryList.end())
return nullptr;
return cast<GlobalValueSummary>(It->get());
}
namespace {
using EdgeInfo = std::tuple<const FunctionSummary *, unsigned /* Threshold */,
GlobalValue::GUID>;
} // anonymous namespace
static ValueInfo
updateValueInfoForIndirectCalls(const ModuleSummaryIndex &Index, ValueInfo VI) {
if (!VI.getSummaryList().empty())
return VI;
// For SamplePGO, the indirect call targets for local functions will
// have its original name annotated in profile. We try to find the
// corresponding PGOFuncName as the GUID.
// FIXME: Consider updating the edges in the graph after building
// it, rather than needing to perform this mapping on each walk.
auto GUID = Index.getGUIDFromOriginalID(VI.getGUID());
if (GUID == 0)
return ValueInfo();
return Index.getValueInfo(GUID);
}
static void computeImportForReferencedGlobals(
const FunctionSummary &Summary, const GVSummaryMapTy &DefinedGVSummaries,
FunctionImporter::ImportMapTy &ImportList,
StringMap<FunctionImporter::ExportSetTy> *ExportLists) {
for (auto &VI : Summary.refs()) {
if (DefinedGVSummaries.count(VI.getGUID())) {
LLVM_DEBUG(
dbgs() << "Ref ignored! Target already in destination module.\n");
continue;
}
LLVM_DEBUG(dbgs() << " ref -> " << VI << "\n");
for (auto &RefSummary : VI.getSummaryList())
if (RefSummary->getSummaryKind() == GlobalValueSummary::GlobalVarKind &&
- // Don't try to import regular LTO summaries added to dummy module.
- !RefSummary->modulePath().empty() &&
+ !RefSummary->notEligibleToImport() &&
!GlobalValue::isInterposableLinkage(RefSummary->linkage()) &&
RefSummary->refs().empty()) {
ImportList[RefSummary->modulePath()].insert(VI.getGUID());
if (ExportLists)
(*ExportLists)[RefSummary->modulePath()].insert(VI.getGUID());
break;
}
}
}
/// Compute the list of functions to import for a given caller. Mark these
/// imported functions and the symbols they reference in their source module as
/// exported from their source module.
static void computeImportForFunction(
const FunctionSummary &Summary, const ModuleSummaryIndex &Index,
const unsigned Threshold, const GVSummaryMapTy &DefinedGVSummaries,
SmallVectorImpl<EdgeInfo> &Worklist,
FunctionImporter::ImportMapTy &ImportList,
StringMap<FunctionImporter::ExportSetTy> *ExportLists,
FunctionImporter::ImportThresholdsTy &ImportThresholds) {
computeImportForReferencedGlobals(Summary, DefinedGVSummaries, ImportList,
ExportLists);
static int ImportCount = 0;
for (auto &Edge : Summary.calls()) {
ValueInfo VI = Edge.first;
LLVM_DEBUG(dbgs() << " edge -> " << VI << " Threshold:" << Threshold
<< "\n");
if (ImportCutoff >= 0 && ImportCount >= ImportCutoff) {
LLVM_DEBUG(dbgs() << "ignored! import-cutoff value of " << ImportCutoff
<< " reached.\n");
continue;
}
VI = updateValueInfoForIndirectCalls(Index, VI);
if (!VI)
continue;
if (DefinedGVSummaries.count(VI.getGUID())) {
LLVM_DEBUG(dbgs() << "ignored! Target already in destination module.\n");
continue;
}
auto GetBonusMultiplier = [](CalleeInfo::HotnessType Hotness) -> float {
if (Hotness == CalleeInfo::HotnessType::Hot)
return ImportHotMultiplier;
if (Hotness == CalleeInfo::HotnessType::Cold)
return ImportColdMultiplier;
if (Hotness == CalleeInfo::HotnessType::Critical)
return ImportCriticalMultiplier;
return 1.0;
};
const auto NewThreshold =
Threshold * GetBonusMultiplier(Edge.second.getHotness());
auto IT = ImportThresholds.insert(
std::make_pair(VI.getGUID(), std::make_pair(NewThreshold, nullptr)));
bool PreviouslyVisited = !IT.second;
auto &ProcessedThreshold = IT.first->second.first;
auto &CalleeSummary = IT.first->second.second;
const FunctionSummary *ResolvedCalleeSummary = nullptr;
if (CalleeSummary) {
assert(PreviouslyVisited);
// Since the traversal of the call graph is DFS, we can revisit a function
// a second time with a higher threshold. In this case, it is added back
// to the worklist with the new threshold (so that its own callee chains
// can be considered with the higher threshold).
if (NewThreshold <= ProcessedThreshold) {
LLVM_DEBUG(
dbgs() << "ignored! Target was already imported with Threshold "
<< ProcessedThreshold << "\n");
continue;
}
// Update with new larger threshold.
ProcessedThreshold = NewThreshold;
ResolvedCalleeSummary = cast<FunctionSummary>(CalleeSummary);
} else {
// If we already rejected importing a callee at the same or higher
// threshold, don't waste time calling selectCallee.
if (PreviouslyVisited && NewThreshold <= ProcessedThreshold) {
LLVM_DEBUG(
dbgs() << "ignored! Target was already rejected with Threshold "
<< ProcessedThreshold << "\n");
continue;
}
CalleeSummary = selectCallee(Index, VI.getSummaryList(), NewThreshold,
Summary.modulePath());
if (!CalleeSummary) {
// Update with new larger threshold if this was a retry (otherwise
// we would have already inserted with NewThreshold above).
if (PreviouslyVisited)
ProcessedThreshold = NewThreshold;
LLVM_DEBUG(
dbgs() << "ignored! No qualifying callee with summary found.\n");
continue;
}
// "Resolve" the summary
CalleeSummary = CalleeSummary->getBaseObject();
ResolvedCalleeSummary = cast<FunctionSummary>(CalleeSummary);
assert(ResolvedCalleeSummary->instCount() <= NewThreshold &&
"selectCallee() didn't honor the threshold");
auto ExportModulePath = ResolvedCalleeSummary->modulePath();
auto ILI = ImportList[ExportModulePath].insert(VI.getGUID());
// We previously decided to import this GUID definition if it was already
// inserted in the set of imports from the exporting module.
bool PreviouslyImported = !ILI.second;
// Make exports in the source module.
if (ExportLists) {
auto &ExportList = (*ExportLists)[ExportModulePath];
ExportList.insert(VI.getGUID());
if (!PreviouslyImported) {
// This is the first time this function was exported from its source
// module, so mark all functions and globals it references as exported
// to the outside if they are defined in the same source module.
// For efficiency, we unconditionally add all the referenced GUIDs
// to the ExportList for this module, and will prune out any not
// defined in the module later in a single pass.
for (auto &Edge : ResolvedCalleeSummary->calls()) {
auto CalleeGUID = Edge.first.getGUID();
ExportList.insert(CalleeGUID);
}
for (auto &Ref : ResolvedCalleeSummary->refs()) {
auto GUID = Ref.getGUID();
ExportList.insert(GUID);
}
}
}
}
auto GetAdjustedThreshold = [](unsigned Threshold, bool IsHotCallsite) {
// Adjust the threshold for next level of imported functions.
// The threshold is different for hot callsites because we can then
// inline chains of hot calls.
if (IsHotCallsite)
return Threshold * ImportHotInstrFactor;
return Threshold * ImportInstrFactor;
};
bool IsHotCallsite =
Edge.second.getHotness() == CalleeInfo::HotnessType::Hot;
const auto AdjThreshold = GetAdjustedThreshold(Threshold, IsHotCallsite);
ImportCount++;
// Insert the newly imported function to the worklist.
Worklist.emplace_back(ResolvedCalleeSummary, AdjThreshold, VI.getGUID());
}
}
/// Given the list of globals defined in a module, compute the list of imports
/// as well as the list of "exports", i.e. the list of symbols referenced from
/// another module (that may require promotion).
static void ComputeImportForModule(
const GVSummaryMapTy &DefinedGVSummaries, const ModuleSummaryIndex &Index,
FunctionImporter::ImportMapTy &ImportList,
StringMap<FunctionImporter::ExportSetTy> *ExportLists = nullptr) {
// Worklist contains the list of function imported in this module, for which
// we will analyse the callees and may import further down the callgraph.
SmallVector<EdgeInfo, 128> Worklist;
FunctionImporter::ImportThresholdsTy ImportThresholds;
// Populate the worklist with the import for the functions in the current
// module
for (auto &GVSummary : DefinedGVSummaries) {
#ifndef NDEBUG
// FIXME: Change the GVSummaryMapTy to hold ValueInfo instead of GUID
// so this map look up (and possibly others) can be avoided.
auto VI = Index.getValueInfo(GVSummary.first);
#endif
if (!Index.isGlobalValueLive(GVSummary.second)) {
LLVM_DEBUG(dbgs() << "Ignores Dead GUID: " << VI << "\n");
continue;
}
auto *FuncSummary =
dyn_cast<FunctionSummary>(GVSummary.second->getBaseObject());
if (!FuncSummary)
// Skip import for global variables
continue;
LLVM_DEBUG(dbgs() << "Initialize import for " << VI << "\n");
computeImportForFunction(*FuncSummary, Index, ImportInstrLimit,
DefinedGVSummaries, Worklist, ImportList,
ExportLists, ImportThresholds);
}
// Process the newly imported functions and add callees to the worklist.
while (!Worklist.empty()) {
auto FuncInfo = Worklist.pop_back_val();
auto *Summary = std::get<0>(FuncInfo);
auto Threshold = std::get<1>(FuncInfo);
computeImportForFunction(*Summary, Index, Threshold, DefinedGVSummaries,
Worklist, ImportList, ExportLists,
ImportThresholds);
}
}
#ifndef NDEBUG
static bool isGlobalVarSummary(const ModuleSummaryIndex &Index,
GlobalValue::GUID G) {
if (const auto &VI = Index.getValueInfo(G)) {
auto SL = VI.getSummaryList();
if (!SL.empty())
return SL[0]->getSummaryKind() == GlobalValueSummary::GlobalVarKind;
}
return false;
}
static GlobalValue::GUID getGUID(GlobalValue::GUID G) { return G; }
template <class T>
static unsigned numGlobalVarSummaries(const ModuleSummaryIndex &Index,
T &Cont) {
unsigned NumGVS = 0;
for (auto &V : Cont)
if (isGlobalVarSummary(Index, getGUID(V)))
++NumGVS;
return NumGVS;
}
#endif
/// Compute all the import and export for every module using the Index.
void llvm::ComputeCrossModuleImport(
const ModuleSummaryIndex &Index,
const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
StringMap<FunctionImporter::ImportMapTy> &ImportLists,
StringMap<FunctionImporter::ExportSetTy> &ExportLists) {
// For each module that has function defined, compute the import/export lists.
for (auto &DefinedGVSummaries : ModuleToDefinedGVSummaries) {
auto &ImportList = ImportLists[DefinedGVSummaries.first()];
LLVM_DEBUG(dbgs() << "Computing import for Module '"
<< DefinedGVSummaries.first() << "'\n");
ComputeImportForModule(DefinedGVSummaries.second, Index, ImportList,
&ExportLists);
}
// When computing imports we added all GUIDs referenced by anything
// imported from the module to its ExportList. Now we prune each ExportList
// of any not defined in that module. This is more efficient than checking
// while computing imports because some of the summary lists may be long
// due to linkonce (comdat) copies.
for (auto &ELI : ExportLists) {
const auto &DefinedGVSummaries =
ModuleToDefinedGVSummaries.lookup(ELI.first());
for (auto EI = ELI.second.begin(); EI != ELI.second.end();) {
if (!DefinedGVSummaries.count(*EI))
EI = ELI.second.erase(EI);
else
++EI;
}
}
#ifndef NDEBUG
LLVM_DEBUG(dbgs() << "Import/Export lists for " << ImportLists.size()
<< " modules:\n");
for (auto &ModuleImports : ImportLists) {
auto ModName = ModuleImports.first();
auto &Exports = ExportLists[ModName];
unsigned NumGVS = numGlobalVarSummaries(Index, Exports);
LLVM_DEBUG(dbgs() << "* Module " << ModName << " exports "
<< Exports.size() - NumGVS << " functions and " << NumGVS
<< " vars. Imports from " << ModuleImports.second.size()
<< " modules.\n");
for (auto &Src : ModuleImports.second) {
auto SrcModName = Src.first();
unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second);
LLVM_DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod
<< " functions imported from " << SrcModName << "\n");
LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod
<< " global vars imported from " << SrcModName << "\n");
}
}
#endif
}
#ifndef NDEBUG
static void dumpImportListForModule(const ModuleSummaryIndex &Index,
StringRef ModulePath,
FunctionImporter::ImportMapTy &ImportList) {
LLVM_DEBUG(dbgs() << "* Module " << ModulePath << " imports from "
<< ImportList.size() << " modules.\n");
for (auto &Src : ImportList) {
auto SrcModName = Src.first();
unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second);
LLVM_DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod
<< " functions imported from " << SrcModName << "\n");
LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod << " vars imported from "
<< SrcModName << "\n");
}
}
#endif
/// Compute all the imports for the given module in the Index.
void llvm::ComputeCrossModuleImportForModule(
StringRef ModulePath, const ModuleSummaryIndex &Index,
FunctionImporter::ImportMapTy &ImportList) {
// Collect the list of functions this module defines.
// GUID -> Summary
GVSummaryMapTy FunctionSummaryMap;
Index.collectDefinedFunctionsForModule(ModulePath, FunctionSummaryMap);
// Compute the import list for this module.
LLVM_DEBUG(dbgs() << "Computing import for Module '" << ModulePath << "'\n");
ComputeImportForModule(FunctionSummaryMap, Index, ImportList);
#ifndef NDEBUG
dumpImportListForModule(Index, ModulePath, ImportList);
#endif
}
// Mark all external summaries in Index for import into the given module.
// Used for distributed builds using a distributed index.
void llvm::ComputeCrossModuleImportForModuleFromIndex(
StringRef ModulePath, const ModuleSummaryIndex &Index,
FunctionImporter::ImportMapTy &ImportList) {
for (auto &GlobalList : Index) {
// Ignore entries for undefined references.
if (GlobalList.second.SummaryList.empty())
continue;
auto GUID = GlobalList.first;
assert(GlobalList.second.SummaryList.size() == 1 &&
"Expected individual combined index to have one summary per GUID");
auto &Summary = GlobalList.second.SummaryList[0];
// Skip the summaries for the importing module. These are included to
// e.g. record required linkage changes.
if (Summary->modulePath() == ModulePath)
continue;
// Add an entry to provoke importing by thinBackend.
ImportList[Summary->modulePath()].insert(GUID);
}
#ifndef NDEBUG
dumpImportListForModule(Index, ModulePath, ImportList);
#endif
}
void llvm::computeDeadSymbols(
ModuleSummaryIndex &Index,
const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols,
function_ref<PrevailingType(GlobalValue::GUID)> isPrevailing) {
assert(!Index.withGlobalValueDeadStripping());
if (!ComputeDead)
return;
if (GUIDPreservedSymbols.empty())
// Don't do anything when nothing is live, this is friendly with tests.
return;
unsigned LiveSymbols = 0;
SmallVector<ValueInfo, 128> Worklist;
Worklist.reserve(GUIDPreservedSymbols.size() * 2);
for (auto GUID : GUIDPreservedSymbols) {
ValueInfo VI = Index.getValueInfo(GUID);
if (!VI)
continue;
for (auto &S : VI.getSummaryList())
S->setLive(true);
}
// Add values flagged in the index as live roots to the worklist.
for (const auto &Entry : Index) {
auto VI = Index.getValueInfo(Entry);
for (auto &S : Entry.second.SummaryList)
if (S->isLive()) {
LLVM_DEBUG(dbgs() << "Live root: " << VI << "\n");
Worklist.push_back(VI);
++LiveSymbols;
break;
}
}
// Make value live and add it to the worklist if it was not live before.
auto visit = [&](ValueInfo VI) {
// FIXME: If we knew which edges were created for indirect call profiles,
// we could skip them here. Any that are live should be reached via
// other edges, e.g. reference edges. Otherwise, using a profile collected
// on a slightly different binary might provoke preserving, importing
// and ultimately promoting calls to functions not linked into this
// binary, which increases the binary size unnecessarily. Note that
// if this code changes, the importer needs to change so that edges
// to functions marked dead are skipped.
VI = updateValueInfoForIndirectCalls(Index, VI);
if (!VI)
return;
for (auto &S : VI.getSummaryList())
if (S->isLive())
return;
// We only keep live symbols that are known to be non-prevailing if any are
// available_externally. Those symbols are discarded later in the
// EliminateAvailableExternally pass and setting them to not-live breaks
// downstreams users of liveness information (PR36483).
if (isPrevailing(VI.getGUID()) == PrevailingType::No) {
bool AvailableExternally = false;
bool Interposable = false;
for (auto &S : VI.getSummaryList()) {
if (S->linkage() == GlobalValue::AvailableExternallyLinkage)
AvailableExternally = true;
else if (GlobalValue::isInterposableLinkage(S->linkage()))
Interposable = true;
}
if (!AvailableExternally)
return;
if (Interposable)
report_fatal_error("Interposable and available_externally symbol");
}
for (auto &S : VI.getSummaryList())
S->setLive(true);
++LiveSymbols;
Worklist.push_back(VI);
};
while (!Worklist.empty()) {
auto VI = Worklist.pop_back_val();
for (auto &Summary : VI.getSummaryList()) {
GlobalValueSummary *Base = Summary->getBaseObject();
// Set base value live in case it is an alias.
Base->setLive(true);
for (auto Ref : Base->refs())
visit(Ref);
if (auto *FS = dyn_cast<FunctionSummary>(Base))
for (auto Call : FS->calls())
visit(Call.first);
}
}
Index.setWithGlobalValueDeadStripping();
unsigned DeadSymbols = Index.size() - LiveSymbols;
LLVM_DEBUG(dbgs() << LiveSymbols << " symbols Live, and " << DeadSymbols
<< " symbols Dead \n");
NumDeadSymbols += DeadSymbols;
NumLiveSymbols += LiveSymbols;
}
/// Compute the set of summaries needed for a ThinLTO backend compilation of
/// \p ModulePath.
void llvm::gatherImportedSummariesForModule(
StringRef ModulePath,
const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
const FunctionImporter::ImportMapTy &ImportList,
std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex) {
// Include all summaries from the importing module.
ModuleToSummariesForIndex[ModulePath] =
ModuleToDefinedGVSummaries.lookup(ModulePath);
// Include summaries for imports.
for (auto &ILI : ImportList) {
auto &SummariesForIndex = ModuleToSummariesForIndex[ILI.first()];
const auto &DefinedGVSummaries =
ModuleToDefinedGVSummaries.lookup(ILI.first());
for (auto &GI : ILI.second) {
const auto &DS = DefinedGVSummaries.find(GI);
assert(DS != DefinedGVSummaries.end() &&
"Expected a defined summary for imported global value");
SummariesForIndex[GI] = DS->second;
}
}
}
/// Emit the files \p ModulePath will import from into \p OutputFilename.
std::error_code llvm::EmitImportsFiles(
StringRef ModulePath, StringRef OutputFilename,
const std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex) {
std::error_code EC;
raw_fd_ostream ImportsOS(OutputFilename, EC, sys::fs::OpenFlags::F_None);
if (EC)
return EC;
for (auto &ILI : ModuleToSummariesForIndex)
// The ModuleToSummariesForIndex map includes an entry for the current
// Module (needed for writing out the index files). We don't want to
// include it in the imports file, however, so filter it out.
if (ILI.first != ModulePath)
ImportsOS << ILI.first << "\n";
return std::error_code();
}
bool llvm::convertToDeclaration(GlobalValue &GV) {
LLVM_DEBUG(dbgs() << "Converting to a declaration: `" << GV.getName()
<< "\n");
if (Function *F = dyn_cast<Function>(&GV)) {
F->deleteBody();
F->clearMetadata();
F->setComdat(nullptr);
} else if (GlobalVariable *V = dyn_cast<GlobalVariable>(&GV)) {
V->setInitializer(nullptr);
V->setLinkage(GlobalValue::ExternalLinkage);
V->clearMetadata();
V->setComdat(nullptr);
} else {
GlobalValue *NewGV;
if (GV.getValueType()->isFunctionTy())
NewGV =
Function::Create(cast<FunctionType>(GV.getValueType()),
GlobalValue::ExternalLinkage, "", GV.getParent());
else
NewGV =
new GlobalVariable(*GV.getParent(), GV.getValueType(),
/*isConstant*/ false, GlobalValue::ExternalLinkage,
/*init*/ nullptr, "",
/*insertbefore*/ nullptr, GV.getThreadLocalMode(),
GV.getType()->getAddressSpace());
NewGV->takeName(&GV);
GV.replaceAllUsesWith(NewGV);
return false;
}
return true;
}
/// Fixup WeakForLinker linkages in \p TheModule based on summary analysis.
void llvm::thinLTOResolveWeakForLinkerModule(
Module &TheModule, const GVSummaryMapTy &DefinedGlobals) {
auto updateLinkage = [&](GlobalValue &GV) {
// See if the global summary analysis computed a new resolved linkage.
const auto &GS = DefinedGlobals.find(GV.getGUID());
if (GS == DefinedGlobals.end())
return;
auto NewLinkage = GS->second->linkage();
if (NewLinkage == GV.getLinkage())
return;
// Switch the linkage to weakany if asked for, e.g. we do this for
// linker redefined symbols (via --wrap or --defsym).
// We record that the visibility should be changed here in `addThinLTO`
// as we need access to the resolution vectors for each input file in
// order to find which symbols have been redefined.
// We may consider reorganizing this code and moving the linkage recording
// somewhere else, e.g. in thinLTOResolveWeakForLinkerInIndex.
if (NewLinkage == GlobalValue::WeakAnyLinkage) {
GV.setLinkage(NewLinkage);
return;
}
if (!GlobalValue::isWeakForLinker(GV.getLinkage()))
return;
// Check for a non-prevailing def that has interposable linkage
// (e.g. non-odr weak or linkonce). In that case we can't simply
// convert to available_externally, since it would lose the
// interposable property and possibly get inlined. Simply drop
// the definition in that case.
if (GlobalValue::isAvailableExternallyLinkage(NewLinkage) &&
GlobalValue::isInterposableLinkage(GV.getLinkage())) {
if (!convertToDeclaration(GV))
// FIXME: Change this to collect replaced GVs and later erase
// them from the parent module once thinLTOResolveWeakForLinkerGUID is
// changed to enable this for aliases.
llvm_unreachable("Expected GV to be converted");
} else {
// If the original symbols has global unnamed addr and linkonce_odr linkage,
// it should be an auto hide symbol. Add hidden visibility to the symbol to
// preserve the property.
if (GV.hasLinkOnceODRLinkage() && GV.hasGlobalUnnamedAddr() &&
NewLinkage == GlobalValue::WeakODRLinkage)
GV.setVisibility(GlobalValue::HiddenVisibility);
LLVM_DEBUG(dbgs() << "ODR fixing up linkage for `" << GV.getName()
<< "` from " << GV.getLinkage() << " to " << NewLinkage
<< "\n");
GV.setLinkage(NewLinkage);
}
// Remove declarations from comdats, including available_externally
// as this is a declaration for the linker, and will be dropped eventually.
// It is illegal for comdats to contain declarations.
auto *GO = dyn_cast_or_null<GlobalObject>(&GV);
if (GO && GO->isDeclarationForLinker() && GO->hasComdat())
GO->setComdat(nullptr);
};
// Process functions and global now
for (auto &GV : TheModule)
updateLinkage(GV);
for (auto &GV : TheModule.globals())
updateLinkage(GV);
for (auto &GV : TheModule.aliases())
updateLinkage(GV);
}
/// Run internalization on \p TheModule based on symmary analysis.
void llvm::thinLTOInternalizeModule(Module &TheModule,
const GVSummaryMapTy &DefinedGlobals) {
// Declare a callback for the internalize pass that will ask for every
// candidate GlobalValue if it can be internalized or not.
auto MustPreserveGV = [&](const GlobalValue &GV) -> bool {
// Lookup the linkage recorded in the summaries during global analysis.
auto GS = DefinedGlobals.find(GV.getGUID());
if (GS == DefinedGlobals.end()) {
// Must have been promoted (possibly conservatively). Find original
// name so that we can access the correct summary and see if it can
// be internalized again.
// FIXME: Eventually we should control promotion instead of promoting
// and internalizing again.
StringRef OrigName =
ModuleSummaryIndex::getOriginalNameBeforePromote(GV.getName());
std::string OrigId = GlobalValue::getGlobalIdentifier(
OrigName, GlobalValue::InternalLinkage,
TheModule.getSourceFileName());
GS = DefinedGlobals.find(GlobalValue::getGUID(OrigId));
if (GS == DefinedGlobals.end()) {
// Also check the original non-promoted non-globalized name. In some
// cases a preempted weak value is linked in as a local copy because
// it is referenced by an alias (IRLinker::linkGlobalValueProto).
// In that case, since it was originally not a local value, it was
// recorded in the index using the original name.
// FIXME: This may not be needed once PR27866 is fixed.
GS = DefinedGlobals.find(GlobalValue::getGUID(OrigName));
assert(GS != DefinedGlobals.end());
}
}
return !GlobalValue::isLocalLinkage(GS->second->linkage());
};
// FIXME: See if we can just internalize directly here via linkage changes
// based on the index, rather than invoking internalizeModule.
internalizeModule(TheModule, MustPreserveGV);
}
/// Make alias a clone of its aliasee.
static Function *replaceAliasWithAliasee(Module *SrcModule, GlobalAlias *GA) {
Function *Fn = cast<Function>(GA->getBaseObject());
ValueToValueMapTy VMap;
Function *NewFn = CloneFunction(Fn, VMap);
// Clone should use the original alias's linkage and name, and we ensure
// all uses of alias instead use the new clone (casted if necessary).
NewFn->setLinkage(GA->getLinkage());
GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewFn, GA->getType()));
NewFn->takeName(GA);
return NewFn;
}
// Automatically import functions in Module \p DestModule based on the summaries
// index.
Expected<bool> FunctionImporter::importFunctions(
Module &DestModule, const FunctionImporter::ImportMapTy &ImportList) {
LLVM_DEBUG(dbgs() << "Starting import for Module "
<< DestModule.getModuleIdentifier() << "\n");
unsigned ImportedCount = 0, ImportedGVCount = 0;
IRMover Mover(DestModule);
// Do the actual import of functions now, one Module at a time
std::set<StringRef> ModuleNameOrderedList;
for (auto &FunctionsToImportPerModule : ImportList) {
ModuleNameOrderedList.insert(FunctionsToImportPerModule.first());
}
for (auto &Name : ModuleNameOrderedList) {
// Get the module for the import
const auto &FunctionsToImportPerModule = ImportList.find(Name);
assert(FunctionsToImportPerModule != ImportList.end());
Expected<std::unique_ptr<Module>> SrcModuleOrErr = ModuleLoader(Name);
if (!SrcModuleOrErr)
return SrcModuleOrErr.takeError();
std::unique_ptr<Module> SrcModule = std::move(*SrcModuleOrErr);
assert(&DestModule.getContext() == &SrcModule->getContext() &&
"Context mismatch");
// If modules were created with lazy metadata loading, materialize it
// now, before linking it (otherwise this will be a noop).
if (Error Err = SrcModule->materializeMetadata())
return std::move(Err);
auto &ImportGUIDs = FunctionsToImportPerModule->second;
// Find the globals to import
SetVector<GlobalValue *> GlobalsToImport;
for (Function &F : *SrcModule) {
if (!F.hasName())
continue;
auto GUID = F.getGUID();
auto Import = ImportGUIDs.count(GUID);
LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing function "
<< GUID << " " << F.getName() << " from "
<< SrcModule->getSourceFileName() << "\n");
if (Import) {
if (Error Err = F.materialize())
return std::move(Err);
if (EnableImportMetadata) {
// Add 'thinlto_src_module' metadata for statistics and debugging.
F.setMetadata(
"thinlto_src_module",
MDNode::get(DestModule.getContext(),
{MDString::get(DestModule.getContext(),
SrcModule->getSourceFileName())}));
}
GlobalsToImport.insert(&F);
}
}
for (GlobalVariable &GV : SrcModule->globals()) {
if (!GV.hasName())
continue;
auto GUID = GV.getGUID();
auto Import = ImportGUIDs.count(GUID);
LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing global "
<< GUID << " " << GV.getName() << " from "
<< SrcModule->getSourceFileName() << "\n");
if (Import) {
if (Error Err = GV.materialize())
return std::move(Err);
ImportedGVCount += GlobalsToImport.insert(&GV);
}
}
for (GlobalAlias &GA : SrcModule->aliases()) {
if (!GA.hasName())
continue;
auto GUID = GA.getGUID();
auto Import = ImportGUIDs.count(GUID);
LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing alias "
<< GUID << " " << GA.getName() << " from "
<< SrcModule->getSourceFileName() << "\n");
if (Import) {
if (Error Err = GA.materialize())
return std::move(Err);
// Import alias as a copy of its aliasee.
GlobalObject *Base = GA.getBaseObject();
if (Error Err = Base->materialize())
return std::move(Err);
auto *Fn = replaceAliasWithAliasee(SrcModule.get(), &GA);
LLVM_DEBUG(dbgs() << "Is importing aliasee fn " << Base->getGUID()
<< " " << Base->getName() << " from "
<< SrcModule->getSourceFileName() << "\n");
if (EnableImportMetadata) {
// Add 'thinlto_src_module' metadata for statistics and debugging.
Fn->setMetadata(
"thinlto_src_module",
MDNode::get(DestModule.getContext(),
{MDString::get(DestModule.getContext(),
SrcModule->getSourceFileName())}));
}
GlobalsToImport.insert(Fn);
}
}
// Upgrade debug info after we're done materializing all the globals and we
// have loaded all the required metadata!
UpgradeDebugInfo(*SrcModule);
// Link in the specified functions.
if (renameModuleForThinLTO(*SrcModule, Index, &GlobalsToImport))
return true;
if (PrintImports) {
for (const auto *GV : GlobalsToImport)
dbgs() << DestModule.getSourceFileName() << ": Import " << GV->getName()
<< " from " << SrcModule->getSourceFileName() << "\n";
}
if (Mover.move(std::move(SrcModule), GlobalsToImport.getArrayRef(),
[](GlobalValue &, IRMover::ValueAdder) {},
/*IsPerformingImport=*/true))
report_fatal_error("Function Import: link error");
ImportedCount += GlobalsToImport.size();
NumImportedModules++;
}
NumImportedFunctions += (ImportedCount - ImportedGVCount);
NumImportedGlobalVars += ImportedGVCount;
LLVM_DEBUG(dbgs() << "Imported " << ImportedCount - ImportedGVCount
<< " functions for Module "
<< DestModule.getModuleIdentifier() << "\n");
LLVM_DEBUG(dbgs() << "Imported " << ImportedGVCount
<< " global variables for Module "
<< DestModule.getModuleIdentifier() << "\n");
return ImportedCount;
}
static bool doImportingForModule(Module &M) {
if (SummaryFile.empty())
report_fatal_error("error: -function-import requires -summary-file\n");
Expected<std::unique_ptr<ModuleSummaryIndex>> IndexPtrOrErr =
getModuleSummaryIndexForFile(SummaryFile);
if (!IndexPtrOrErr) {
logAllUnhandledErrors(IndexPtrOrErr.takeError(), errs(),
"Error loading file '" + SummaryFile + "': ");
return false;
}
std::unique_ptr<ModuleSummaryIndex> Index = std::move(*IndexPtrOrErr);
// First step is collecting the import list.
FunctionImporter::ImportMapTy ImportList;
// If requested, simply import all functions in the index. This is used
// when testing distributed backend handling via the opt tool, when
// we have distributed indexes containing exactly the summaries to import.
if (ImportAllIndex)
ComputeCrossModuleImportForModuleFromIndex(M.getModuleIdentifier(), *Index,
ImportList);
else
ComputeCrossModuleImportForModule(M.getModuleIdentifier(), *Index,
ImportList);
// Conservatively mark all internal values as promoted. This interface is
// only used when doing importing via the function importing pass. The pass
// is only enabled when testing importing via the 'opt' tool, which does
// not do the ThinLink that would normally determine what values to promote.
for (auto &I : *Index) {
for (auto &S : I.second.SummaryList) {
if (GlobalValue::isLocalLinkage(S->linkage()))
S->setLinkage(GlobalValue::ExternalLinkage);
}
}
// Next we need to promote to global scope and rename any local values that
// are potentially exported to other modules.
if (renameModuleForThinLTO(M, *Index, nullptr)) {
errs() << "Error renaming module\n";
return false;
}
// Perform the import now.
auto ModuleLoader = [&M](StringRef Identifier) {
return loadFile(Identifier, M.getContext());
};
FunctionImporter Importer(*Index, ModuleLoader);
Expected<bool> Result = Importer.importFunctions(M, ImportList);
// FIXME: Probably need to propagate Errors through the pass manager.
if (!Result) {
logAllUnhandledErrors(Result.takeError(), errs(),
"Error importing module: ");
return false;
}
return *Result;
}
namespace {
/// Pass that performs cross-module function import provided a summary file.
class FunctionImportLegacyPass : public ModulePass {
public:
/// Pass identification, replacement for typeid
static char ID;
explicit FunctionImportLegacyPass() : ModulePass(ID) {}
/// Specify pass name for debug output
StringRef getPassName() const override { return "Function Importing"; }
bool runOnModule(Module &M) override {
if (skipModule(M))
return false;
return doImportingForModule(M);
}
};
} // end anonymous namespace
PreservedAnalyses FunctionImportPass::run(Module &M,
ModuleAnalysisManager &AM) {
if (!doImportingForModule(M))
return PreservedAnalyses::all();
return PreservedAnalyses::none();
}
char FunctionImportLegacyPass::ID = 0;
INITIALIZE_PASS(FunctionImportLegacyPass, "function-import",
"Summary Based Function Import", false, false)
namespace llvm {
Pass *createFunctionImportPass() {
return new FunctionImportLegacyPass();
}
} // end namespace llvm
Index: projects/clang700-import/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp (revision 340125)
@@ -1,1749 +1,1750 @@
//===- LoopIdiomRecognize.cpp - Loop idiom recognition --------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass implements an idiom recognizer that transforms simple loops into a
// non-loop form. In cases that this kicks in, it can be a significant
// performance win.
//
// If compiling for code size we avoid idiom recognition if the resulting
// code could be larger than the code for the original loop. One way this could
// happen is if the loop is not removable after idiom recognition due to the
// presence of non-idiom instructions. The initial implementation of the
// heuristics applies to idioms in multi-block loops.
//
//===----------------------------------------------------------------------===//
//
// TODO List:
//
// Future loop memory idioms to recognize:
// memcmp, memmove, strlen, etc.
// Future floating point idioms to recognize in -ffast-math mode:
// fpowi
// Future integer operation idioms to recognize:
// ctpop, ctlz, cttz
//
// Beware that isel's default lowering for ctpop is highly inefficient for
// i64 and larger types when i64 is legal and the value has few bits set. It
// would be good to enhance isel to emit a loop for ctpop in this case.
//
// This could recognize common matrix multiplies and dot product idioms and
// replace them with calls to BLAS (if linked in??).
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/ValueHandle.h"
#include "llvm/Pass.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Scalar/LoopIdiomRecognize.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/Transforms/Utils/LoopUtils.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <utility>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "loop-idiom"
STATISTIC(NumMemSet, "Number of memset's formed from loop stores");
STATISTIC(NumMemCpy, "Number of memcpy's formed from loop load+stores");
static cl::opt<bool> UseLIRCodeSizeHeurs(
"use-lir-code-size-heurs",
cl::desc("Use loop idiom recognition code size heuristics when compiling"
"with -Os/-Oz"),
cl::init(true), cl::Hidden);
namespace {
class LoopIdiomRecognize {
Loop *CurLoop = nullptr;
AliasAnalysis *AA;
DominatorTree *DT;
LoopInfo *LI;
ScalarEvolution *SE;
TargetLibraryInfo *TLI;
const TargetTransformInfo *TTI;
const DataLayout *DL;
bool ApplyCodeSizeHeuristics;
public:
explicit LoopIdiomRecognize(AliasAnalysis *AA, DominatorTree *DT,
LoopInfo *LI, ScalarEvolution *SE,
TargetLibraryInfo *TLI,
const TargetTransformInfo *TTI,
const DataLayout *DL)
: AA(AA), DT(DT), LI(LI), SE(SE), TLI(TLI), TTI(TTI), DL(DL) {}
bool runOnLoop(Loop *L);
private:
using StoreList = SmallVector<StoreInst *, 8>;
using StoreListMap = MapVector<Value *, StoreList>;
StoreListMap StoreRefsForMemset;
StoreListMap StoreRefsForMemsetPattern;
StoreList StoreRefsForMemcpy;
bool HasMemset;
bool HasMemsetPattern;
bool HasMemcpy;
/// Return code for isLegalStore()
enum LegalStoreKind {
None = 0,
Memset,
MemsetPattern,
Memcpy,
UnorderedAtomicMemcpy,
DontUse // Dummy retval never to be used. Allows catching errors in retval
// handling.
};
/// \name Countable Loop Idiom Handling
/// @{
bool runOnCountableLoop();
bool runOnLoopBlock(BasicBlock *BB, const SCEV *BECount,
SmallVectorImpl<BasicBlock *> &ExitBlocks);
void collectStores(BasicBlock *BB);
LegalStoreKind isLegalStore(StoreInst *SI);
bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
bool ForMemset);
bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
bool processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
unsigned StoreAlignment, Value *StoredVal,
Instruction *TheStore,
SmallPtrSetImpl<Instruction *> &Stores,
const SCEVAddRecExpr *Ev, const SCEV *BECount,
bool NegStride, bool IsLoopMemset = false);
bool processLoopStoreOfLoopLoad(StoreInst *SI, const SCEV *BECount);
bool avoidLIRForMultiBlockLoop(bool IsMemset = false,
bool IsLoopMemset = false);
/// @}
/// \name Noncountable Loop Idiom Handling
/// @{
bool runOnNoncountableLoop();
bool recognizePopcount();
void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
PHINode *CntPhi, Value *Var);
bool recognizeAndInsertCTLZ();
void transformLoopToCountable(BasicBlock *PreCondBB, Instruction *CntInst,
PHINode *CntPhi, Value *Var, Instruction *DefX,
const DebugLoc &DL, bool ZeroCheck,
bool IsCntPhiUsedOutsideLoop);
/// @}
};
class LoopIdiomRecognizeLegacyPass : public LoopPass {
public:
static char ID;
explicit LoopIdiomRecognizeLegacyPass() : LoopPass(ID) {
initializeLoopIdiomRecognizeLegacyPassPass(
*PassRegistry::getPassRegistry());
}
bool runOnLoop(Loop *L, LPPassManager &LPM) override {
if (skipLoop(L))
return false;
AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
LoopInfo *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
TargetLibraryInfo *TLI =
&getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
const TargetTransformInfo *TTI =
&getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
*L->getHeader()->getParent());
const DataLayout *DL = &L->getHeader()->getModule()->getDataLayout();
LoopIdiomRecognize LIR(AA, DT, LI, SE, TLI, TTI, DL);
return LIR.runOnLoop(L);
}
/// This transformation requires natural loop information & requires that
/// loop preheaders be inserted into the CFG.
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<TargetLibraryInfoWrapperPass>();
AU.addRequired<TargetTransformInfoWrapperPass>();
getLoopAnalysisUsage(AU);
}
};
} // end anonymous namespace
char LoopIdiomRecognizeLegacyPass::ID = 0;
PreservedAnalyses LoopIdiomRecognizePass::run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR,
LPMUpdater &) {
const auto *DL = &L.getHeader()->getModule()->getDataLayout();
LoopIdiomRecognize LIR(&AR.AA, &AR.DT, &AR.LI, &AR.SE, &AR.TLI, &AR.TTI, DL);
if (!LIR.runOnLoop(&L))
return PreservedAnalyses::all();
return getLoopPassPreservedAnalyses();
}
INITIALIZE_PASS_BEGIN(LoopIdiomRecognizeLegacyPass, "loop-idiom",
"Recognize loop idioms", false, false)
INITIALIZE_PASS_DEPENDENCY(LoopPass)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
INITIALIZE_PASS_END(LoopIdiomRecognizeLegacyPass, "loop-idiom",
"Recognize loop idioms", false, false)
Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognizeLegacyPass(); }
static void deleteDeadInstruction(Instruction *I) {
I->replaceAllUsesWith(UndefValue::get(I->getType()));
I->eraseFromParent();
}
//===----------------------------------------------------------------------===//
//
// Implementation of LoopIdiomRecognize
//
//===----------------------------------------------------------------------===//
bool LoopIdiomRecognize::runOnLoop(Loop *L) {
CurLoop = L;
// If the loop could not be converted to canonical form, it must have an
// indirectbr in it, just give up.
if (!L->getLoopPreheader())
return false;
// Disable loop idiom recognition if the function's name is a common idiom.
StringRef Name = L->getHeader()->getParent()->getName();
if (Name == "memset" || Name == "memcpy")
return false;
// Determine if code size heuristics need to be applied.
ApplyCodeSizeHeuristics =
L->getHeader()->getParent()->optForSize() && UseLIRCodeSizeHeurs;
HasMemset = TLI->has(LibFunc_memset);
HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
HasMemcpy = TLI->has(LibFunc_memcpy);
if (HasMemset || HasMemsetPattern || HasMemcpy)
if (SE->hasLoopInvariantBackedgeTakenCount(L))
return runOnCountableLoop();
return runOnNoncountableLoop();
}
bool LoopIdiomRecognize::runOnCountableLoop() {
const SCEV *BECount = SE->getBackedgeTakenCount(CurLoop);
assert(!isa<SCEVCouldNotCompute>(BECount) &&
"runOnCountableLoop() called on a loop without a predictable"
"backedge-taken count");
// If this loop executes exactly one time, then it should be peeled, not
// optimized by this pass.
if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
if (BECst->getAPInt() == 0)
return false;
SmallVector<BasicBlock *, 8> ExitBlocks;
CurLoop->getUniqueExitBlocks(ExitBlocks);
LLVM_DEBUG(dbgs() << "loop-idiom Scanning: F["
<< CurLoop->getHeader()->getParent()->getName()
<< "] Loop %" << CurLoop->getHeader()->getName() << "\n");
bool MadeChange = false;
// The following transforms hoist stores/memsets into the loop pre-header.
// Give up if the loop has instructions may throw.
LoopSafetyInfo SafetyInfo;
computeLoopSafetyInfo(&SafetyInfo, CurLoop);
if (SafetyInfo.MayThrow)
return MadeChange;
// Scan all the blocks in the loop that are not in subloops.
for (auto *BB : CurLoop->getBlocks()) {
// Ignore blocks in subloops.
if (LI->getLoopFor(BB) != CurLoop)
continue;
MadeChange |= runOnLoopBlock(BB, BECount, ExitBlocks);
}
return MadeChange;
}
static APInt getStoreStride(const SCEVAddRecExpr *StoreEv) {
const SCEVConstant *ConstStride = cast<SCEVConstant>(StoreEv->getOperand(1));
return ConstStride->getAPInt();
}
/// getMemSetPatternValue - If a strided store of the specified value is safe to
/// turn into a memset_pattern16, return a ConstantArray of 16 bytes that should
/// be passed in. Otherwise, return null.
///
/// Note that we don't ever attempt to use memset_pattern8 or 4, because these
/// just replicate their input array and then pass on to memset_pattern16.
static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
// If the value isn't a constant, we can't promote it to being in a constant
// array. We could theoretically do a store to an alloca or something, but
// that doesn't seem worthwhile.
Constant *C = dyn_cast<Constant>(V);
if (!C)
return nullptr;
// Only handle simple values that are a power of two bytes in size.
uint64_t Size = DL->getTypeSizeInBits(V->getType());
if (Size == 0 || (Size & 7) || (Size & (Size - 1)))
return nullptr;
// Don't care enough about darwin/ppc to implement this.
if (DL->isBigEndian())
return nullptr;
// Convert to size in bytes.
Size /= 8;
// TODO: If CI is larger than 16-bytes, we can try slicing it in half to see
// if the top and bottom are the same (e.g. for vectors and large integers).
if (Size > 16)
return nullptr;
// If the constant is exactly 16 bytes, just use it.
if (Size == 16)
return C;
// Otherwise, we'll use an array of the constants.
unsigned ArraySize = 16 / Size;
ArrayType *AT = ArrayType::get(V->getType(), ArraySize);
return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
}
LoopIdiomRecognize::LegalStoreKind
LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
// Don't touch volatile stores.
if (SI->isVolatile())
return LegalStoreKind::None;
// We only want simple or unordered-atomic stores.
if (!SI->isUnordered())
return LegalStoreKind::None;
// Don't convert stores of non-integral pointer types to memsets (which stores
// integers).
if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType()))
return LegalStoreKind::None;
// Avoid merging nontemporal stores.
if (SI->getMetadata(LLVMContext::MD_nontemporal))
return LegalStoreKind::None;
Value *StoredVal = SI->getValueOperand();
Value *StorePtr = SI->getPointerOperand();
// Reject stores that are so large that they overflow an unsigned.
uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
return LegalStoreKind::None;
// See if the pointer expression is an AddRec like {base,+,1} on the current
// loop, which indicates a strided store. If we have something else, it's a
// random store we can't handle.
const SCEVAddRecExpr *StoreEv =
dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
return LegalStoreKind::None;
// Check to see if we have a constant stride.
if (!isa<SCEVConstant>(StoreEv->getOperand(1)))
return LegalStoreKind::None;
// See if the store can be turned into a memset.
// If the stored value is a byte-wise value (like i32 -1), then it may be
// turned into a memset of i8 -1, assuming that all the consecutive bytes
// are stored. A store of i32 0x01020304 can never be turned into a memset,
// but it can be turned into memset_pattern if the target supports it.
Value *SplatValue = isBytewiseValue(StoredVal);
Constant *PatternValue = nullptr;
// Note: memset and memset_pattern on unordered-atomic is yet not supported
bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
// If we're allowed to form a memset, and the stored value would be
// acceptable for memset, use it.
if (!UnorderedAtomic && HasMemset && SplatValue &&
// Verify that the stored value is loop invariant. If not, we can't
// promote the memset.
CurLoop->isLoopInvariant(SplatValue)) {
// It looks like we can use SplatValue.
return LegalStoreKind::Memset;
} else if (!UnorderedAtomic && HasMemsetPattern &&
// Don't create memset_pattern16s with address spaces.
StorePtr->getType()->getPointerAddressSpace() == 0 &&
(PatternValue = getMemSetPatternValue(StoredVal, DL))) {
// It looks like we can use PatternValue!
return LegalStoreKind::MemsetPattern;
}
// Otherwise, see if the store can be turned into a memcpy.
if (HasMemcpy) {
// Check to see if the stride matches the size of the store. If so, then we
// know that every byte is touched in the loop.
APInt Stride = getStoreStride(StoreEv);
unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
if (StoreSize != Stride && StoreSize != -Stride)
return LegalStoreKind::None;
// The store must be feeding a non-volatile load.
LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
// Only allow non-volatile loads
if (!LI || LI->isVolatile())
return LegalStoreKind::None;
// Only allow simple or unordered-atomic loads
if (!LI->isUnordered())
return LegalStoreKind::None;
// See if the pointer expression is an AddRec like {base,+,1} on the current
// loop, which indicates a strided load. If we have something else, it's a
// random load we can't handle.
const SCEVAddRecExpr *LoadEv =
dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
return LegalStoreKind::None;
// The store and load must share the same stride.
if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
return LegalStoreKind::None;
// Success. This store can be converted into a memcpy.
UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
: LegalStoreKind::Memcpy;
}
// This store can't be transformed into a memset/memcpy.
return LegalStoreKind::None;
}
void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
StoreRefsForMemset.clear();
StoreRefsForMemsetPattern.clear();
StoreRefsForMemcpy.clear();
for (Instruction &I : *BB) {
StoreInst *SI = dyn_cast<StoreInst>(&I);
if (!SI)
continue;
// Make sure this is a strided store with a constant stride.
switch (isLegalStore(SI)) {
case LegalStoreKind::None:
// Nothing to do
break;
case LegalStoreKind::Memset: {
// Find the base pointer.
Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
StoreRefsForMemset[Ptr].push_back(SI);
} break;
case LegalStoreKind::MemsetPattern: {
// Find the base pointer.
Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
StoreRefsForMemsetPattern[Ptr].push_back(SI);
} break;
case LegalStoreKind::Memcpy:
case LegalStoreKind::UnorderedAtomicMemcpy:
StoreRefsForMemcpy.push_back(SI);
break;
default:
assert(false && "unhandled return value");
break;
}
}
}
/// runOnLoopBlock - Process the specified block, which lives in a counted loop
/// with the specified backedge count. This block is known to be in the current
/// loop and not in any subloops.
bool LoopIdiomRecognize::runOnLoopBlock(
BasicBlock *BB, const SCEV *BECount,
SmallVectorImpl<BasicBlock *> &ExitBlocks) {
// We can only promote stores in this block if they are unconditionally
// executed in the loop. For a block to be unconditionally executed, it has
// to dominate all the exit blocks of the loop. Verify this now.
for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
if (!DT->dominates(BB, ExitBlocks[i]))
return false;
bool MadeChange = false;
// Look for store instructions, which may be optimized to memset/memcpy.
collectStores(BB);
// Look for a single store or sets of stores with a common base, which can be
// optimized into a memset (memset_pattern). The latter most commonly happens
// with structs and handunrolled loops.
for (auto &SL : StoreRefsForMemset)
MadeChange |= processLoopStores(SL.second, BECount, true);
for (auto &SL : StoreRefsForMemsetPattern)
MadeChange |= processLoopStores(SL.second, BECount, false);
// Optimize the store into a memcpy, if it feeds an similarly strided load.
for (auto &SI : StoreRefsForMemcpy)
MadeChange |= processLoopStoreOfLoopLoad(SI, BECount);
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
Instruction *Inst = &*I++;
// Look for memset instructions, which may be optimized to a larger memset.
if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
WeakTrackingVH InstPtr(&*I);
if (!processLoopMemSet(MSI, BECount))
continue;
MadeChange = true;
// If processing the memset invalidated our iterator, start over from the
// top of the block.
if (!InstPtr)
I = BB->begin();
continue;
}
}
return MadeChange;
}
/// processLoopStores - See if this store(s) can be promoted to a memset.
bool LoopIdiomRecognize::processLoopStores(SmallVectorImpl<StoreInst *> &SL,
const SCEV *BECount,
bool ForMemset) {
// Try to find consecutive stores that can be transformed into memsets.
SetVector<StoreInst *> Heads, Tails;
SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
// Do a quadratic search on all of the given stores and find
// all of the pairs of stores that follow each other.
SmallVector<unsigned, 16> IndexQueue;
for (unsigned i = 0, e = SL.size(); i < e; ++i) {
assert(SL[i]->isSimple() && "Expected only non-volatile stores.");
Value *FirstStoredVal = SL[i]->getValueOperand();
Value *FirstStorePtr = SL[i]->getPointerOperand();
const SCEVAddRecExpr *FirstStoreEv =
cast<SCEVAddRecExpr>(SE->getSCEV(FirstStorePtr));
APInt FirstStride = getStoreStride(FirstStoreEv);
unsigned FirstStoreSize = DL->getTypeStoreSize(SL[i]->getValueOperand()->getType());
// See if we can optimize just this store in isolation.
if (FirstStride == FirstStoreSize || -FirstStride == FirstStoreSize) {
Heads.insert(SL[i]);
continue;
}
Value *FirstSplatValue = nullptr;
Constant *FirstPatternValue = nullptr;
if (ForMemset)
FirstSplatValue = isBytewiseValue(FirstStoredVal);
else
FirstPatternValue = getMemSetPatternValue(FirstStoredVal, DL);
assert((FirstSplatValue || FirstPatternValue) &&
"Expected either splat value or pattern value.");
IndexQueue.clear();
// If a store has multiple consecutive store candidates, search Stores
// array according to the sequence: from i+1 to e, then from i-1 to 0.
// This is because usually pairing with immediate succeeding or preceding
// candidate create the best chance to find memset opportunity.
unsigned j = 0;
for (j = i + 1; j < e; ++j)
IndexQueue.push_back(j);
for (j = i; j > 0; --j)
IndexQueue.push_back(j - 1);
for (auto &k : IndexQueue) {
assert(SL[k]->isSimple() && "Expected only non-volatile stores.");
Value *SecondStorePtr = SL[k]->getPointerOperand();
const SCEVAddRecExpr *SecondStoreEv =
cast<SCEVAddRecExpr>(SE->getSCEV(SecondStorePtr));
APInt SecondStride = getStoreStride(SecondStoreEv);
if (FirstStride != SecondStride)
continue;
Value *SecondStoredVal = SL[k]->getValueOperand();
Value *SecondSplatValue = nullptr;
Constant *SecondPatternValue = nullptr;
if (ForMemset)
SecondSplatValue = isBytewiseValue(SecondStoredVal);
else
SecondPatternValue = getMemSetPatternValue(SecondStoredVal, DL);
assert((SecondSplatValue || SecondPatternValue) &&
"Expected either splat value or pattern value.");
if (isConsecutiveAccess(SL[i], SL[k], *DL, *SE, false)) {
if (ForMemset) {
if (FirstSplatValue != SecondSplatValue)
continue;
} else {
if (FirstPatternValue != SecondPatternValue)
continue;
}
Tails.insert(SL[k]);
Heads.insert(SL[i]);
ConsecutiveChain[SL[i]] = SL[k];
break;
}
}
}
// We may run into multiple chains that merge into a single chain. We mark the
// stores that we transformed so that we don't visit the same store twice.
SmallPtrSet<Value *, 16> TransformedStores;
bool Changed = false;
// For stores that start but don't end a link in the chain:
for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
it != e; ++it) {
if (Tails.count(*it))
continue;
// We found a store instr that starts a chain. Now follow the chain and try
// to transform it.
SmallPtrSet<Instruction *, 8> AdjacentStores;
StoreInst *I = *it;
StoreInst *HeadStore = I;
unsigned StoreSize = 0;
// Collect the chain into a list.
while (Tails.count(I) || Heads.count(I)) {
if (TransformedStores.count(I))
break;
AdjacentStores.insert(I);
StoreSize += DL->getTypeStoreSize(I->getValueOperand()->getType());
// Move to the next value in the chain.
I = ConsecutiveChain[I];
}
Value *StoredVal = HeadStore->getValueOperand();
Value *StorePtr = HeadStore->getPointerOperand();
const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
APInt Stride = getStoreStride(StoreEv);
// Check to see if the stride matches the size of the stores. If so, then
// we know that every byte is touched in the loop.
if (StoreSize != Stride && StoreSize != -Stride)
continue;
bool NegStride = StoreSize == -Stride;
if (processLoopStridedStore(StorePtr, StoreSize, HeadStore->getAlignment(),
StoredVal, HeadStore, AdjacentStores, StoreEv,
BECount, NegStride)) {
TransformedStores.insert(AdjacentStores.begin(), AdjacentStores.end());
Changed = true;
}
}
return Changed;
}
/// processLoopMemSet - See if this memset can be promoted to a large memset.
bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
const SCEV *BECount) {
// We can only handle non-volatile memsets with a constant size.
if (MSI->isVolatile() || !isa<ConstantInt>(MSI->getLength()))
return false;
// If we're not allowed to hack on memset, we fail.
if (!HasMemset)
return false;
Value *Pointer = MSI->getDest();
// See if the pointer expression is an AddRec like {base,+,1} on the current
// loop, which indicates a strided store. If we have something else, it's a
// random store we can't handle.
const SCEVAddRecExpr *Ev = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Pointer));
if (!Ev || Ev->getLoop() != CurLoop || !Ev->isAffine())
return false;
// Reject memsets that are so large that they overflow an unsigned.
uint64_t SizeInBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
if ((SizeInBytes >> 32) != 0)
return false;
// Check to see if the stride matches the size of the memset. If so, then we
// know that every byte is touched in the loop.
const SCEVConstant *ConstStride = dyn_cast<SCEVConstant>(Ev->getOperand(1));
if (!ConstStride)
return false;
APInt Stride = ConstStride->getAPInt();
if (SizeInBytes != Stride && SizeInBytes != -Stride)
return false;
// Verify that the memset value is loop invariant. If not, we can't promote
// the memset.
Value *SplatValue = MSI->getValue();
if (!SplatValue || !CurLoop->isLoopInvariant(SplatValue))
return false;
SmallPtrSet<Instruction *, 1> MSIs;
MSIs.insert(MSI);
bool NegStride = SizeInBytes == -Stride;
return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
MSI->getDestAlignment(), SplatValue, MSI, MSIs,
Ev, BECount, NegStride, /*IsLoopMemset=*/true);
}
/// mayLoopAccessLocation - Return true if the specified loop might access the
/// specified pointer location, which is a loop-strided access. The 'Access'
/// argument specifies what the verboten forms of access are (read or write).
static bool
mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
const SCEV *BECount, unsigned StoreSize,
AliasAnalysis &AA,
SmallPtrSetImpl<Instruction *> &IgnoredStores) {
// Get the location that may be stored across the loop. Since the access is
// strided positively through memory, we say that the modified location starts
// at the pointer and has infinite size.
uint64_t AccessSize = MemoryLocation::UnknownSize;
// If the loop iterates a fixed number of times, we can refine the access size
// to be exactly the size of the memset, which is (BECount+1)*StoreSize
if (const SCEVConstant *BECst = dyn_cast<SCEVConstant>(BECount))
AccessSize = (BECst->getValue()->getZExtValue() + 1) * StoreSize;
// TODO: For this to be really effective, we have to dive into the pointer
// operand in the store. Store to &A[i] of 100 will always return may alias
// with store of &A[100], we need to StoreLoc to be "A" with size of 100,
// which will then no-alias a store to &A[100].
MemoryLocation StoreLoc(Ptr, AccessSize);
for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E;
++BI)
for (Instruction &I : **BI)
if (IgnoredStores.count(&I) == 0 &&
isModOrRefSet(
intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access)))
return true;
return false;
}
// If we have a negative stride, Start refers to the end of the memory location
// we're trying to memset. Therefore, we need to recompute the base pointer,
// which is just Start - BECount*Size.
static const SCEV *getStartForNegStride(const SCEV *Start, const SCEV *BECount,
Type *IntPtr, unsigned StoreSize,
ScalarEvolution *SE) {
const SCEV *Index = SE->getTruncateOrZeroExtend(BECount, IntPtr);
if (StoreSize != 1)
Index = SE->getMulExpr(Index, SE->getConstant(IntPtr, StoreSize),
SCEV::FlagNUW);
return SE->getMinusSCEV(Start, Index);
}
/// Compute the number of bytes as a SCEV from the backedge taken count.
///
/// This also maps the SCEV into the provided type and tries to handle the
/// computation in a way that will fold cleanly.
static const SCEV *getNumBytes(const SCEV *BECount, Type *IntPtr,
unsigned StoreSize, Loop *CurLoop,
const DataLayout *DL, ScalarEvolution *SE) {
const SCEV *NumBytesS;
// The # stored bytes is (BECount+1)*Size. Expand the trip count out to
// pointer size if it isn't already.
//
// If we're going to need to zero extend the BE count, check if we can add
// one to it prior to zero extending without overflow. Provided this is safe,
// it allows better simplification of the +1.
if (DL->getTypeSizeInBits(BECount->getType()) <
DL->getTypeSizeInBits(IntPtr) &&
SE->isLoopEntryGuardedByCond(
CurLoop, ICmpInst::ICMP_NE, BECount,
SE->getNegativeSCEV(SE->getOne(BECount->getType())))) {
NumBytesS = SE->getZeroExtendExpr(
SE->getAddExpr(BECount, SE->getOne(BECount->getType()), SCEV::FlagNUW),
IntPtr);
} else {
NumBytesS = SE->getAddExpr(SE->getTruncateOrZeroExtend(BECount, IntPtr),
SE->getOne(IntPtr), SCEV::FlagNUW);
}
// And scale it based on the store size.
if (StoreSize != 1) {
NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtr, StoreSize),
SCEV::FlagNUW);
}
return NumBytesS;
}
/// processLoopStridedStore - We see a strided store of some value. If we can
/// transform this into a memset or memset_pattern in the loop preheader, do so.
bool LoopIdiomRecognize::processLoopStridedStore(
Value *DestPtr, unsigned StoreSize, unsigned StoreAlignment,
Value *StoredVal, Instruction *TheStore,
SmallPtrSetImpl<Instruction *> &Stores, const SCEVAddRecExpr *Ev,
const SCEV *BECount, bool NegStride, bool IsLoopMemset) {
Value *SplatValue = isBytewiseValue(StoredVal);
Constant *PatternValue = nullptr;
if (!SplatValue)
PatternValue = getMemSetPatternValue(StoredVal, DL);
assert((SplatValue || PatternValue) &&
"Expected either splat value or pattern value.");
// The trip count of the loop and the base pointer of the addrec SCEV is
// guaranteed to be loop invariant, which means that it should dominate the
// header. This allows us to insert code for it in the preheader.
unsigned DestAS = DestPtr->getType()->getPointerAddressSpace();
BasicBlock *Preheader = CurLoop->getLoopPreheader();
IRBuilder<> Builder(Preheader->getTerminator());
SCEVExpander Expander(*SE, *DL, "loop-idiom");
Type *DestInt8PtrTy = Builder.getInt8PtrTy(DestAS);
Type *IntPtr = Builder.getIntPtrTy(*DL, DestAS);
const SCEV *Start = Ev->getStart();
// Handle negative strided loops.
if (NegStride)
Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
// TODO: ideally we should still be able to generate memset if SCEV expander
// is taught to generate the dependencies at the latest point.
if (!isSafeToExpand(Start, *SE))
return false;
// Okay, we have a strided store "p[i]" of a splattable value. We can turn
// this into a memset in the loop preheader now if we want. However, this
// would be unsafe to do if there is anything else in the loop that may read
// or write to the aliased location. Check for any overlap by generating the
// base pointer and checking the region.
Value *BasePtr =
Expander.expandCodeFor(Start, DestInt8PtrTy, Preheader->getTerminator());
if (mayLoopAccessLocation(BasePtr, ModRefInfo::ModRef, CurLoop, BECount,
StoreSize, *AA, Stores)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
RecursivelyDeleteTriviallyDeadInstructions(BasePtr, TLI);
return false;
}
if (avoidLIRForMultiBlockLoop(/*IsMemset=*/true, IsLoopMemset))
return false;
// Okay, everything looks good, insert the memset.
const SCEV *NumBytesS =
getNumBytes(BECount, IntPtr, StoreSize, CurLoop, DL, SE);
// TODO: ideally we should still be able to generate memset if SCEV expander
// is taught to generate the dependencies at the latest point.
if (!isSafeToExpand(NumBytesS, *SE))
return false;
Value *NumBytes =
Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
CallInst *NewCall;
if (SplatValue) {
NewCall =
Builder.CreateMemSet(BasePtr, SplatValue, NumBytes, StoreAlignment);
} else {
// Everything is emitted in default address space
Type *Int8PtrTy = DestInt8PtrTy;
Module *M = TheStore->getModule();
+ StringRef FuncName = "memset_pattern16";
Value *MSP =
- M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(),
+ M->getOrInsertFunction(FuncName, Builder.getVoidTy(),
Int8PtrTy, Int8PtrTy, IntPtr);
- inferLibFuncAttributes(*M->getFunction("memset_pattern16"), *TLI);
+ inferLibFuncAttributes(M, FuncName, *TLI);
// Otherwise we should form a memset_pattern16. PatternValue is known to be
// an constant array of 16-bytes. Plop the value into a mergable global.
GlobalVariable *GV = new GlobalVariable(*M, PatternValue->getType(), true,
GlobalValue::PrivateLinkage,
PatternValue, ".memset_pattern");
GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); // Ok to merge these.
GV->setAlignment(16);
Value *PatternPtr = ConstantExpr::getBitCast(GV, Int8PtrTy);
NewCall = Builder.CreateCall(MSP, {BasePtr, PatternPtr, NumBytes});
}
LLVM_DEBUG(dbgs() << " Formed memset: " << *NewCall << "\n"
<< " from store to: " << *Ev << " at: " << *TheStore
<< "\n");
NewCall->setDebugLoc(TheStore->getDebugLoc());
// Okay, the memset has been formed. Zap the original store and anything that
// feeds into it.
for (auto *I : Stores)
deleteDeadInstruction(I);
++NumMemSet;
return true;
}
/// If the stored value is a strided load in the same loop with the same stride
/// this may be transformable into a memcpy. This kicks in for stuff like
/// for (i) A[i] = B[i];
bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
const SCEV *BECount) {
assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.");
Value *StorePtr = SI->getPointerOperand();
const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
APInt Stride = getStoreStride(StoreEv);
unsigned StoreSize = DL->getTypeStoreSize(SI->getValueOperand()->getType());
bool NegStride = StoreSize == -Stride;
// The store must be feeding a non-volatile load.
LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.");
// See if the pointer expression is an AddRec like {base,+,1} on the current
// loop, which indicates a strided load. If we have something else, it's a
// random load we can't handle.
const SCEVAddRecExpr *LoadEv =
cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
// The trip count of the loop and the base pointer of the addrec SCEV is
// guaranteed to be loop invariant, which means that it should dominate the
// header. This allows us to insert code for it in the preheader.
BasicBlock *Preheader = CurLoop->getLoopPreheader();
IRBuilder<> Builder(Preheader->getTerminator());
SCEVExpander Expander(*SE, *DL, "loop-idiom");
const SCEV *StrStart = StoreEv->getStart();
unsigned StrAS = SI->getPointerAddressSpace();
Type *IntPtrTy = Builder.getIntPtrTy(*DL, StrAS);
// Handle negative strided loops.
if (NegStride)
StrStart = getStartForNegStride(StrStart, BECount, IntPtrTy, StoreSize, SE);
// Okay, we have a strided store "p[i]" of a loaded value. We can turn
// this into a memcpy in the loop preheader now if we want. However, this
// would be unsafe to do if there is anything else in the loop that may read
// or write the memory region we're storing to. This includes the load that
// feeds the stores. Check for an alias by generating the base address and
// checking everything.
Value *StoreBasePtr = Expander.expandCodeFor(
StrStart, Builder.getInt8PtrTy(StrAS), Preheader->getTerminator());
SmallPtrSet<Instruction *, 1> Stores;
Stores.insert(SI);
if (mayLoopAccessLocation(StoreBasePtr, ModRefInfo::ModRef, CurLoop, BECount,
StoreSize, *AA, Stores)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
return false;
}
const SCEV *LdStart = LoadEv->getStart();
unsigned LdAS = LI->getPointerAddressSpace();
// Handle negative strided loops.
if (NegStride)
LdStart = getStartForNegStride(LdStart, BECount, IntPtrTy, StoreSize, SE);
// For a memcpy, we have to make sure that the input array is not being
// mutated by the loop.
Value *LoadBasePtr = Expander.expandCodeFor(
LdStart, Builder.getInt8PtrTy(LdAS), Preheader->getTerminator());
if (mayLoopAccessLocation(LoadBasePtr, ModRefInfo::Mod, CurLoop, BECount,
StoreSize, *AA, Stores)) {
Expander.clear();
// If we generated new code for the base pointer, clean up.
RecursivelyDeleteTriviallyDeadInstructions(LoadBasePtr, TLI);
RecursivelyDeleteTriviallyDeadInstructions(StoreBasePtr, TLI);
return false;
}
if (avoidLIRForMultiBlockLoop())
return false;
// Okay, everything is safe, we can transform this!
const SCEV *NumBytesS =
getNumBytes(BECount, IntPtrTy, StoreSize, CurLoop, DL, SE);
Value *NumBytes =
Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
CallInst *NewCall = nullptr;
// Check whether to generate an unordered atomic memcpy:
// If the load or store are atomic, then they must necessarily be unordered
// by previous checks.
if (!SI->isAtomic() && !LI->isAtomic())
NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlignment(),
LoadBasePtr, LI->getAlignment(), NumBytes);
else {
// We cannot allow unaligned ops for unordered load/store, so reject
// anything where the alignment isn't at least the element size.
unsigned Align = std::min(SI->getAlignment(), LI->getAlignment());
if (Align < StoreSize)
return false;
// If the element.atomic memcpy is not lowered into explicit
// loads/stores later, then it will be lowered into an element-size
// specific lib call. If the lib call doesn't exist for our store size, then
// we shouldn't generate the memcpy.
if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
return false;
// Create the call.
// Note that unordered atomic loads/stores are *required* by the spec to
// have an alignment but non-atomic loads/stores may not.
NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
StoreBasePtr, SI->getAlignment(), LoadBasePtr, LI->getAlignment(),
NumBytes, StoreSize);
}
NewCall->setDebugLoc(SI->getDebugLoc());
LLVM_DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
<< " from load ptr=" << *LoadEv << " at: " << *LI << "\n"
<< " from store ptr=" << *StoreEv << " at: " << *SI
<< "\n");
// Okay, the memcpy has been formed. Zap the original store and anything that
// feeds into it.
deleteDeadInstruction(SI);
++NumMemCpy;
return true;
}
// When compiling for codesize we avoid idiom recognition for a multi-block loop
// unless it is a loop_memset idiom or a memset/memcpy idiom in a nested loop.
//
bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
bool IsLoopMemset) {
if (ApplyCodeSizeHeuristics && CurLoop->getNumBlocks() > 1) {
if (!CurLoop->getParentLoop() && (!IsMemset || !IsLoopMemset)) {
LLVM_DEBUG(dbgs() << " " << CurLoop->getHeader()->getParent()->getName()
<< " : LIR " << (IsMemset ? "Memset" : "Memcpy")
<< " avoided: multi-block top-level loop\n");
return true;
}
}
return false;
}
bool LoopIdiomRecognize::runOnNoncountableLoop() {
return recognizePopcount() || recognizeAndInsertCTLZ();
}
/// Check if the given conditional branch is based on the comparison between
/// a variable and zero, and if the variable is non-zero, the control yields to
/// the loop entry. If the branch matches the behavior, the variable involved
/// in the comparison is returned. This function will be called to see if the
/// precondition and postcondition of the loop are in desirable form.
static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) {
if (!BI || !BI->isConditional())
return nullptr;
ICmpInst *Cond = dyn_cast<ICmpInst>(BI->getCondition());
if (!Cond)
return nullptr;
ConstantInt *CmpZero = dyn_cast<ConstantInt>(Cond->getOperand(1));
if (!CmpZero || !CmpZero->isZero())
return nullptr;
ICmpInst::Predicate Pred = Cond->getPredicate();
if ((Pred == ICmpInst::ICMP_NE && BI->getSuccessor(0) == LoopEntry) ||
(Pred == ICmpInst::ICMP_EQ && BI->getSuccessor(1) == LoopEntry))
return Cond->getOperand(0);
return nullptr;
}
// Check if the recurrence variable `VarX` is in the right form to create
// the idiom. Returns the value coerced to a PHINode if so.
static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX,
BasicBlock *LoopEntry) {
auto *PhiX = dyn_cast<PHINode>(VarX);
if (PhiX && PhiX->getParent() == LoopEntry &&
(PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX))
return PhiX;
return nullptr;
}
/// Return true iff the idiom is detected in the loop.
///
/// Additionally:
/// 1) \p CntInst is set to the instruction counting the population bit.
/// 2) \p CntPhi is set to the corresponding phi node.
/// 3) \p Var is set to the value whose population bits are being counted.
///
/// The core idiom we are trying to detect is:
/// \code
/// if (x0 != 0)
/// goto loop-exit // the precondition of the loop
/// cnt0 = init-val;
/// do {
/// x1 = phi (x0, x2);
/// cnt1 = phi(cnt0, cnt2);
///
/// cnt2 = cnt1 + 1;
/// ...
/// x2 = x1 & (x1 - 1);
/// ...
/// } while(x != 0);
///
/// loop-exit:
/// \endcode
static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
Instruction *&CntInst, PHINode *&CntPhi,
Value *&Var) {
// step 1: Check to see if the look-back branch match this pattern:
// "if (a!=0) goto loop-entry".
BasicBlock *LoopEntry;
Instruction *DefX2, *CountInst;
Value *VarX1, *VarX0;
PHINode *PhiX, *CountPhi;
DefX2 = CountInst = nullptr;
VarX1 = VarX0 = nullptr;
PhiX = CountPhi = nullptr;
LoopEntry = *(CurLoop->block_begin());
// step 1: Check if the loop-back branch is in desirable form.
{
if (Value *T = matchCondition(
dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
DefX2 = dyn_cast<Instruction>(T);
else
return false;
}
// step 2: detect instructions corresponding to "x2 = x1 & (x1 - 1)"
{
if (!DefX2 || DefX2->getOpcode() != Instruction::And)
return false;
BinaryOperator *SubOneOp;
if ((SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(0))))
VarX1 = DefX2->getOperand(1);
else {
VarX1 = DefX2->getOperand(0);
SubOneOp = dyn_cast<BinaryOperator>(DefX2->getOperand(1));
}
if (!SubOneOp || SubOneOp->getOperand(0) != VarX1)
return false;
ConstantInt *Dec = dyn_cast<ConstantInt>(SubOneOp->getOperand(1));
if (!Dec ||
!((SubOneOp->getOpcode() == Instruction::Sub && Dec->isOne()) ||
(SubOneOp->getOpcode() == Instruction::Add &&
Dec->isMinusOne()))) {
return false;
}
}
// step 3: Check the recurrence of variable X
PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry);
if (!PhiX)
return false;
// step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
{
CountInst = nullptr;
for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
IterE = LoopEntry->end();
Iter != IterE; Iter++) {
Instruction *Inst = &*Iter;
if (Inst->getOpcode() != Instruction::Add)
continue;
ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
if (!Inc || !Inc->isOne())
continue;
PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
if (!Phi)
continue;
// Check if the result of the instruction is live of the loop.
bool LiveOutLoop = false;
for (User *U : Inst->users()) {
if ((cast<Instruction>(U))->getParent() != LoopEntry) {
LiveOutLoop = true;
break;
}
}
if (LiveOutLoop) {
CountInst = Inst;
CountPhi = Phi;
break;
}
}
if (!CountInst)
return false;
}
// step 5: check if the precondition is in this form:
// "if (x != 0) goto loop-head ; else goto somewhere-we-don't-care;"
{
auto *PreCondBr = dyn_cast<BranchInst>(PreCondBB->getTerminator());
Value *T = matchCondition(PreCondBr, CurLoop->getLoopPreheader());
if (T != PhiX->getOperand(0) && T != PhiX->getOperand(1))
return false;
CntInst = CountInst;
CntPhi = CountPhi;
Var = T;
}
return true;
}
/// Return true if the idiom is detected in the loop.
///
/// Additionally:
/// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
/// or nullptr if there is no such.
/// 2) \p CntPhi is set to the corresponding phi node
/// or nullptr if there is no such.
/// 3) \p Var is set to the value whose CTLZ could be used.
/// 4) \p DefX is set to the instruction calculating Loop exit condition.
///
/// The core idiom we are trying to detect is:
/// \code
/// if (x0 == 0)
/// goto loop-exit // the precondition of the loop
/// cnt0 = init-val;
/// do {
/// x = phi (x0, x.next); //PhiX
/// cnt = phi(cnt0, cnt.next);
///
/// cnt.next = cnt + 1;
/// ...
/// x.next = x >> 1; // DefX
/// ...
/// } while(x.next != 0);
///
/// loop-exit:
/// \endcode
static bool detectCTLZIdiom(Loop *CurLoop, PHINode *&PhiX,
Instruction *&CntInst, PHINode *&CntPhi,
Instruction *&DefX) {
BasicBlock *LoopEntry;
Value *VarX = nullptr;
DefX = nullptr;
PhiX = nullptr;
CntInst = nullptr;
CntPhi = nullptr;
LoopEntry = *(CurLoop->block_begin());
// step 1: Check if the loop-back branch is in desirable form.
if (Value *T = matchCondition(
dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
DefX = dyn_cast<Instruction>(T);
else
return false;
// step 2: detect instructions corresponding to "x.next = x >> 1"
if (!DefX || (DefX->getOpcode() != Instruction::AShr &&
DefX->getOpcode() != Instruction::LShr))
return false;
ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1));
if (!Shft || !Shft->isOne())
return false;
VarX = DefX->getOperand(0);
// step 3: Check the recurrence of variable X
PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
if (!PhiX)
return false;
// step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
// TODO: We can skip the step. If loop trip count is known (CTLZ),
// then all uses of "cnt.next" could be optimized to the trip count
// plus "cnt0". Currently it is not optimized.
// This step could be used to detect POPCNT instruction:
// cnt.next = cnt + (x.next & 1)
for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
IterE = LoopEntry->end();
Iter != IterE; Iter++) {
Instruction *Inst = &*Iter;
if (Inst->getOpcode() != Instruction::Add)
continue;
ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
if (!Inc || !Inc->isOne())
continue;
PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
if (!Phi)
continue;
CntInst = Inst;
CntPhi = Phi;
break;
}
if (!CntInst)
return false;
return true;
}
/// Recognize CTLZ idiom in a non-countable loop and convert the loop
/// to countable (with CTLZ trip count).
/// If CTLZ inserted as a new trip count returns true; otherwise, returns false.
bool LoopIdiomRecognize::recognizeAndInsertCTLZ() {
// Give up if the loop has multiple blocks or multiple backedges.
if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
return false;
Instruction *CntInst, *DefX;
PHINode *CntPhi, *PhiX;
if (!detectCTLZIdiom(CurLoop, PhiX, CntInst, CntPhi, DefX))
return false;
bool IsCntPhiUsedOutsideLoop = false;
for (User *U : CntPhi->users())
if (!CurLoop->contains(cast<Instruction>(U))) {
IsCntPhiUsedOutsideLoop = true;
break;
}
bool IsCntInstUsedOutsideLoop = false;
for (User *U : CntInst->users())
if (!CurLoop->contains(cast<Instruction>(U))) {
IsCntInstUsedOutsideLoop = true;
break;
}
// If both CntInst and CntPhi are used outside the loop the profitability
// is questionable.
if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
return false;
// For some CPUs result of CTLZ(X) intrinsic is undefined
// when X is 0. If we can not guarantee X != 0, we need to check this
// when expand.
bool ZeroCheck = false;
// It is safe to assume Preheader exist as it was checked in
// parent function RunOnLoop.
BasicBlock *PH = CurLoop->getLoopPreheader();
Value *InitX = PhiX->getIncomingValueForBlock(PH);
// Make sure the initial value can't be negative otherwise the ashr in the
// loop might never reach zero which would make the loop infinite.
if (DefX->getOpcode() == Instruction::AShr && !isKnownNonNegative(InitX, *DL))
return false;
// If we are using the count instruction outside the loop, make sure we
// have a zero check as a precondition. Without the check the loop would run
// one iteration for before any check of the input value. This means 0 and 1
// would have identical behavior in the original loop and thus
if (!IsCntPhiUsedOutsideLoop) {
auto *PreCondBB = PH->getSinglePredecessor();
if (!PreCondBB)
return false;
auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
if (!PreCondBI)
return false;
if (matchCondition(PreCondBI, PH) != InitX)
return false;
ZeroCheck = true;
}
// Check if CTLZ intrinsic is profitable. Assume it is always profitable
// if we delete the loop (the loop has only 6 instructions):
// %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
// %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
// %shr = ashr %n.addr.0, 1
// %tobool = icmp eq %shr, 0
// %inc = add nsw %i.0, 1
// br i1 %tobool
const Value *Args[] =
{InitX, ZeroCheck ? ConstantInt::getTrue(InitX->getContext())
: ConstantInt::getFalse(InitX->getContext())};
if (CurLoop->getHeader()->size() != 6 &&
TTI->getIntrinsicCost(Intrinsic::ctlz, InitX->getType(), Args) >
TargetTransformInfo::TCC_Basic)
return false;
transformLoopToCountable(PH, CntInst, CntPhi, InitX, DefX,
DefX->getDebugLoc(), ZeroCheck,
IsCntPhiUsedOutsideLoop);
return true;
}
/// Recognizes a population count idiom in a non-countable loop.
///
/// If detected, transforms the relevant code to issue the popcount intrinsic
/// function call, and returns true; otherwise, returns false.
bool LoopIdiomRecognize::recognizePopcount() {
if (TTI->getPopcntSupport(32) != TargetTransformInfo::PSK_FastHardware)
return false;
// Counting population are usually conducted by few arithmetic instructions.
// Such instructions can be easily "absorbed" by vacant slots in a
// non-compact loop. Therefore, recognizing popcount idiom only makes sense
// in a compact loop.
// Give up if the loop has multiple blocks or multiple backedges.
if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
return false;
BasicBlock *LoopBody = *(CurLoop->block_begin());
if (LoopBody->size() >= 20) {
// The loop is too big, bail out.
return false;
}
// It should have a preheader containing nothing but an unconditional branch.
BasicBlock *PH = CurLoop->getLoopPreheader();
if (!PH || &PH->front() != PH->getTerminator())
return false;
auto *EntryBI = dyn_cast<BranchInst>(PH->getTerminator());
if (!EntryBI || EntryBI->isConditional())
return false;
// It should have a precondition block where the generated popcount intrinsic
// function can be inserted.
auto *PreCondBB = PH->getSinglePredecessor();
if (!PreCondBB)
return false;
auto *PreCondBI = dyn_cast<BranchInst>(PreCondBB->getTerminator());
if (!PreCondBI || PreCondBI->isUnconditional())
return false;
Instruction *CntInst;
PHINode *CntPhi;
Value *Val;
if (!detectPopcountIdiom(CurLoop, PreCondBB, CntInst, CntPhi, Val))
return false;
transformLoopToPopcount(PreCondBB, CntInst, CntPhi, Val);
return true;
}
static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
const DebugLoc &DL) {
Value *Ops[] = {Val};
Type *Tys[] = {Val->getType()};
Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctpop, Tys);
CallInst *CI = IRBuilder.CreateCall(Func, Ops);
CI->setDebugLoc(DL);
return CI;
}
static CallInst *createCTLZIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
const DebugLoc &DL, bool ZeroCheck) {
Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()};
Type *Tys[] = {Val->getType()};
Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctlz, Tys);
CallInst *CI = IRBuilder.CreateCall(Func, Ops);
CI->setDebugLoc(DL);
return CI;
}
/// Transform the following loop:
/// loop:
/// CntPhi = PHI [Cnt0, CntInst]
/// PhiX = PHI [InitX, DefX]
/// CntInst = CntPhi + 1
/// DefX = PhiX >> 1
/// LOOP_BODY
/// Br: loop if (DefX != 0)
/// Use(CntPhi) or Use(CntInst)
///
/// Into:
/// If CntPhi used outside the loop:
/// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1)
/// Count = CountPrev + 1
/// else
/// Count = BitWidth(InitX) - CTLZ(InitX)
/// loop:
/// CntPhi = PHI [Cnt0, CntInst]
/// PhiX = PHI [InitX, DefX]
/// PhiCount = PHI [Count, Dec]
/// CntInst = CntPhi + 1
/// DefX = PhiX >> 1
/// Dec = PhiCount - 1
/// LOOP_BODY
/// Br: loop if (Dec != 0)
/// Use(CountPrev + Cnt0) // Use(CntPhi)
/// or
/// Use(Count + Cnt0) // Use(CntInst)
///
/// If LOOP_BODY is empty the loop will be deleted.
/// If CntInst and DefX are not used in LOOP_BODY they will be removed.
void LoopIdiomRecognize::transformLoopToCountable(
BasicBlock *Preheader, Instruction *CntInst, PHINode *CntPhi, Value *InitX,
Instruction *DefX, const DebugLoc &DL, bool ZeroCheck,
bool IsCntPhiUsedOutsideLoop) {
BranchInst *PreheaderBr = cast<BranchInst>(Preheader->getTerminator());
// Step 1: Insert the CTLZ instruction at the end of the preheader block
// Count = BitWidth - CTLZ(InitX);
// If there are uses of CntPhi create:
// CountPrev = BitWidth - CTLZ(InitX >> 1);
IRBuilder<> Builder(PreheaderBr);
Builder.SetCurrentDebugLocation(DL);
Value *CTLZ, *Count, *CountPrev, *NewCount, *InitXNext;
if (IsCntPhiUsedOutsideLoop) {
if (DefX->getOpcode() == Instruction::AShr)
InitXNext =
Builder.CreateAShr(InitX, ConstantInt::get(InitX->getType(), 1));
else if (DefX->getOpcode() == Instruction::LShr)
InitXNext =
Builder.CreateLShr(InitX, ConstantInt::get(InitX->getType(), 1));
else
llvm_unreachable("Unexpected opcode!");
} else
InitXNext = InitX;
CTLZ = createCTLZIntrinsic(Builder, InitXNext, DL, ZeroCheck);
Count = Builder.CreateSub(
ConstantInt::get(CTLZ->getType(),
CTLZ->getType()->getIntegerBitWidth()),
CTLZ);
if (IsCntPhiUsedOutsideLoop) {
CountPrev = Count;
Count = Builder.CreateAdd(
CountPrev,
ConstantInt::get(CountPrev->getType(), 1));
}
if (IsCntPhiUsedOutsideLoop)
NewCount = Builder.CreateZExtOrTrunc(CountPrev,
cast<IntegerType>(CntInst->getType()));
else
NewCount = Builder.CreateZExtOrTrunc(Count,
cast<IntegerType>(CntInst->getType()));
// If the CTLZ counter's initial value is not zero, insert Add Inst.
Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader);
ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
if (!InitConst || !InitConst->isZero())
NewCount = Builder.CreateAdd(NewCount, CntInitVal);
// Step 2: Insert new IV and loop condition:
// loop:
// ...
// PhiCount = PHI [Count, Dec]
// ...
// Dec = PhiCount - 1
// ...
// Br: loop if (Dec != 0)
BasicBlock *Body = *(CurLoop->block_begin());
auto *LbBr = cast<BranchInst>(Body->getTerminator());
ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
Type *Ty = Count->getType();
PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
Builder.SetInsertPoint(LbCond);
Instruction *TcDec = cast<Instruction>(
Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
"tcdec", false, true));
TcPhi->addIncoming(Count, Preheader);
TcPhi->addIncoming(TcDec, Body);
CmpInst::Predicate Pred =
(LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
LbCond->setPredicate(Pred);
LbCond->setOperand(0, TcDec);
LbCond->setOperand(1, ConstantInt::get(Ty, 0));
// Step 3: All the references to the original counter outside
// the loop are replaced with the NewCount -- the value returned from
// __builtin_ctlz(x).
if (IsCntPhiUsedOutsideLoop)
CntPhi->replaceUsesOutsideBlock(NewCount, Body);
else
CntInst->replaceUsesOutsideBlock(NewCount, Body);
// step 4: Forget the "non-computable" trip-count SCEV associated with the
// loop. The loop would otherwise not be deleted even if it becomes empty.
SE->forgetLoop(CurLoop);
}
void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
Instruction *CntInst,
PHINode *CntPhi, Value *Var) {
BasicBlock *PreHead = CurLoop->getLoopPreheader();
auto *PreCondBr = cast<BranchInst>(PreCondBB->getTerminator());
const DebugLoc &DL = CntInst->getDebugLoc();
// Assuming before transformation, the loop is following:
// if (x) // the precondition
// do { cnt++; x &= x - 1; } while(x);
// Step 1: Insert the ctpop instruction at the end of the precondition block
IRBuilder<> Builder(PreCondBr);
Value *PopCnt, *PopCntZext, *NewCount, *TripCnt;
{
PopCnt = createPopcntIntrinsic(Builder, Var, DL);
NewCount = PopCntZext =
Builder.CreateZExtOrTrunc(PopCnt, cast<IntegerType>(CntPhi->getType()));
if (NewCount != PopCnt)
(cast<Instruction>(NewCount))->setDebugLoc(DL);
// TripCnt is exactly the number of iterations the loop has
TripCnt = NewCount;
// If the population counter's initial value is not zero, insert Add Inst.
Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead);
ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
if (!InitConst || !InitConst->isZero()) {
NewCount = Builder.CreateAdd(NewCount, CntInitVal);
(cast<Instruction>(NewCount))->setDebugLoc(DL);
}
}
// Step 2: Replace the precondition from "if (x == 0) goto loop-exit" to
// "if (NewCount == 0) loop-exit". Without this change, the intrinsic
// function would be partial dead code, and downstream passes will drag
// it back from the precondition block to the preheader.
{
ICmpInst *PreCond = cast<ICmpInst>(PreCondBr->getCondition());
Value *Opnd0 = PopCntZext;
Value *Opnd1 = ConstantInt::get(PopCntZext->getType(), 0);
if (PreCond->getOperand(0) != Var)
std::swap(Opnd0, Opnd1);
ICmpInst *NewPreCond = cast<ICmpInst>(
Builder.CreateICmp(PreCond->getPredicate(), Opnd0, Opnd1));
PreCondBr->setCondition(NewPreCond);
RecursivelyDeleteTriviallyDeadInstructions(PreCond, TLI);
}
// Step 3: Note that the population count is exactly the trip count of the
// loop in question, which enable us to convert the loop from noncountable
// loop into a countable one. The benefit is twofold:
//
// - If the loop only counts population, the entire loop becomes dead after
// the transformation. It is a lot easier to prove a countable loop dead
// than to prove a noncountable one. (In some C dialects, an infinite loop
// isn't dead even if it computes nothing useful. In general, DCE needs
// to prove a noncountable loop finite before safely delete it.)
//
// - If the loop also performs something else, it remains alive.
// Since it is transformed to countable form, it can be aggressively
// optimized by some optimizations which are in general not applicable
// to a noncountable loop.
//
// After this step, this loop (conceptually) would look like following:
// newcnt = __builtin_ctpop(x);
// t = newcnt;
// if (x)
// do { cnt++; x &= x-1; t--) } while (t > 0);
BasicBlock *Body = *(CurLoop->block_begin());
{
auto *LbBr = cast<BranchInst>(Body->getTerminator());
ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
Type *Ty = TripCnt->getType();
PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
Builder.SetInsertPoint(LbCond);
Instruction *TcDec = cast<Instruction>(
Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
"tcdec", false, true));
TcPhi->addIncoming(TripCnt, PreHead);
TcPhi->addIncoming(TcDec, Body);
CmpInst::Predicate Pred =
(LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_UGT : CmpInst::ICMP_SLE;
LbCond->setPredicate(Pred);
LbCond->setOperand(0, TcDec);
LbCond->setOperand(1, ConstantInt::get(Ty, 0));
}
// Step 4: All the references to the original population counter outside
// the loop are replaced with the NewCount -- the value returned from
// __builtin_ctpop().
CntInst->replaceUsesOutsideBlock(NewCount, Body);
// step 5: Forget the "non-computable" trip-count SCEV associated with the
// loop. The loop would otherwise not be deleted even if it becomes empty.
SE->forgetLoop(CurLoop);
}
Index: projects/clang700-import/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/lib/Transforms/Utils/BuildLibCalls.cpp (revision 340125)
@@ -1,1200 +1,1221 @@
//===- BuildLibCalls.cpp - Utility builder for libcalls -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements some functions that will create standard C libcalls.
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
using namespace llvm;
#define DEBUG_TYPE "build-libcalls"
//- Infer Attributes ---------------------------------------------------------//
STATISTIC(NumReadNone, "Number of functions inferred as readnone");
STATISTIC(NumReadOnly, "Number of functions inferred as readonly");
STATISTIC(NumArgMemOnly, "Number of functions inferred as argmemonly");
STATISTIC(NumNoUnwind, "Number of functions inferred as nounwind");
STATISTIC(NumNoCapture, "Number of arguments inferred as nocapture");
STATISTIC(NumReadOnlyArg, "Number of arguments inferred as readonly");
STATISTIC(NumNoAlias, "Number of function returns inferred as noalias");
STATISTIC(NumNonNull, "Number of function returns inferred as nonnull returns");
static bool setDoesNotAccessMemory(Function &F) {
if (F.doesNotAccessMemory())
return false;
F.setDoesNotAccessMemory();
++NumReadNone;
return true;
}
static bool setOnlyReadsMemory(Function &F) {
if (F.onlyReadsMemory())
return false;
F.setOnlyReadsMemory();
++NumReadOnly;
return true;
}
static bool setOnlyAccessesArgMemory(Function &F) {
if (F.onlyAccessesArgMemory())
return false;
F.setOnlyAccessesArgMemory();
++NumArgMemOnly;
return true;
}
static bool setDoesNotThrow(Function &F) {
if (F.doesNotThrow())
return false;
F.setDoesNotThrow();
++NumNoUnwind;
return true;
}
static bool setRetDoesNotAlias(Function &F) {
if (F.hasAttribute(AttributeList::ReturnIndex, Attribute::NoAlias))
return false;
F.addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
++NumNoAlias;
return true;
}
static bool setDoesNotCapture(Function &F, unsigned ArgNo) {
if (F.hasParamAttribute(ArgNo, Attribute::NoCapture))
return false;
F.addParamAttr(ArgNo, Attribute::NoCapture);
++NumNoCapture;
return true;
}
static bool setOnlyReadsMemory(Function &F, unsigned ArgNo) {
if (F.hasParamAttribute(ArgNo, Attribute::ReadOnly))
return false;
F.addParamAttr(ArgNo, Attribute::ReadOnly);
++NumReadOnlyArg;
return true;
}
static bool setRetNonNull(Function &F) {
assert(F.getReturnType()->isPointerTy() &&
"nonnull applies only to pointers");
if (F.hasAttribute(AttributeList::ReturnIndex, Attribute::NonNull))
return false;
F.addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
++NumNonNull;
return true;
}
static bool setNonLazyBind(Function &F) {
if (F.hasFnAttribute(Attribute::NonLazyBind))
return false;
F.addFnAttr(Attribute::NonLazyBind);
return true;
}
+bool llvm::inferLibFuncAttributes(Module *M, StringRef Name,
+ const TargetLibraryInfo &TLI) {
+ Function *F = M->getFunction(Name);
+ if (!F)
+ return false;
+ return inferLibFuncAttributes(*F, TLI);
+}
+
bool llvm::inferLibFuncAttributes(Function &F, const TargetLibraryInfo &TLI) {
LibFunc TheLibFunc;
if (!(TLI.getLibFunc(F, TheLibFunc) && TLI.has(TheLibFunc)))
return false;
bool Changed = false;
if (F.getParent() != nullptr && F.getParent()->getRtLibUseGOT())
Changed |= setNonLazyBind(F);
switch (TheLibFunc) {
case LibFunc_strlen:
case LibFunc_wcslen:
Changed |= setOnlyReadsMemory(F);
Changed |= setDoesNotThrow(F);
Changed |= setOnlyAccessesArgMemory(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_strchr:
case LibFunc_strrchr:
Changed |= setOnlyReadsMemory(F);
Changed |= setDoesNotThrow(F);
return Changed;
case LibFunc_strtol:
case LibFunc_strtod:
case LibFunc_strtof:
case LibFunc_strtoul:
case LibFunc_strtoll:
case LibFunc_strtold:
case LibFunc_strtoull:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_strcpy:
case LibFunc_stpcpy:
case LibFunc_strcat:
case LibFunc_strncat:
case LibFunc_strncpy:
case LibFunc_stpncpy:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_strxfrm:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_strcmp: // 0,1
case LibFunc_strspn: // 0,1
case LibFunc_strncmp: // 0,1
case LibFunc_strcspn: // 0,1
case LibFunc_strcoll: // 0,1
case LibFunc_strcasecmp: // 0,1
case LibFunc_strncasecmp: //
Changed |= setOnlyReadsMemory(F);
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_strstr:
case LibFunc_strpbrk:
Changed |= setOnlyReadsMemory(F);
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_strtok:
case LibFunc_strtok_r:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_scanf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_setbuf:
case LibFunc_setvbuf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_strdup:
case LibFunc_strndup:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_stat:
case LibFunc_statvfs:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_sscanf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_sprintf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_snprintf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 2);
Changed |= setOnlyReadsMemory(F, 2);
return Changed;
case LibFunc_setitimer:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
Changed |= setDoesNotCapture(F, 2);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_system:
// May throw; "system" is a valid pthread cancellation point.
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_malloc:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
return Changed;
case LibFunc_memcmp:
Changed |= setOnlyReadsMemory(F);
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_memchr:
case LibFunc_memrchr:
Changed |= setOnlyReadsMemory(F);
Changed |= setDoesNotThrow(F);
return Changed;
case LibFunc_modf:
case LibFunc_modff:
case LibFunc_modfl:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_memcpy:
case LibFunc_mempcpy:
case LibFunc_memccpy:
case LibFunc_memmove:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_memcpy_chk:
Changed |= setDoesNotThrow(F);
return Changed;
case LibFunc_memalign:
Changed |= setRetDoesNotAlias(F);
return Changed;
case LibFunc_mkdir:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_mktime:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_realloc:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_read:
// May throw; "read" is a valid pthread cancellation point.
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_rewind:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_rmdir:
case LibFunc_remove:
case LibFunc_realpath:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_rename:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_readlink:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_write:
// May throw; "write" is a valid pthread cancellation point.
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_bcopy:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_bcmp:
Changed |= setDoesNotThrow(F);
Changed |= setOnlyReadsMemory(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_bzero:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_calloc:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
return Changed;
case LibFunc_chmod:
case LibFunc_chown:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_ctermid:
case LibFunc_clearerr:
case LibFunc_closedir:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_atoi:
case LibFunc_atol:
case LibFunc_atof:
case LibFunc_atoll:
Changed |= setDoesNotThrow(F);
Changed |= setOnlyReadsMemory(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_access:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_fopen:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_fdopen:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_feof:
case LibFunc_free:
case LibFunc_fseek:
case LibFunc_ftell:
case LibFunc_fgetc:
case LibFunc_fgetc_unlocked:
case LibFunc_fseeko:
case LibFunc_ftello:
case LibFunc_fileno:
case LibFunc_fflush:
case LibFunc_fclose:
case LibFunc_fsetpos:
case LibFunc_flockfile:
case LibFunc_funlockfile:
case LibFunc_ftrylockfile:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_ferror:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F);
return Changed;
case LibFunc_fputc:
case LibFunc_fputc_unlocked:
case LibFunc_fstat:
case LibFunc_frexp:
case LibFunc_frexpf:
case LibFunc_frexpl:
case LibFunc_fstatvfs:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_fgets:
case LibFunc_fgets_unlocked:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 2);
return Changed;
case LibFunc_fread:
case LibFunc_fread_unlocked:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 3);
return Changed;
case LibFunc_fwrite:
case LibFunc_fwrite_unlocked:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 3);
// FIXME: readonly #1?
return Changed;
case LibFunc_fputs:
case LibFunc_fputs_unlocked:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_fscanf:
case LibFunc_fprintf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_fgetpos:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_getc:
case LibFunc_getlogin_r:
case LibFunc_getc_unlocked:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_getenv:
Changed |= setDoesNotThrow(F);
Changed |= setOnlyReadsMemory(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_gets:
case LibFunc_getchar:
case LibFunc_getchar_unlocked:
Changed |= setDoesNotThrow(F);
return Changed;
case LibFunc_getitimer:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_getpwnam:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_ungetc:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_uname:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_unlink:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_unsetenv:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_utime:
case LibFunc_utimes:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_putc:
case LibFunc_putc_unlocked:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_puts:
case LibFunc_printf:
case LibFunc_perror:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_pread:
// May throw; "pread" is a valid pthread cancellation point.
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_pwrite:
// May throw; "pwrite" is a valid pthread cancellation point.
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_putchar:
case LibFunc_putchar_unlocked:
Changed |= setDoesNotThrow(F);
return Changed;
case LibFunc_popen:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_pclose:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_vscanf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_vsscanf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_vfscanf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_valloc:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
return Changed;
case LibFunc_vprintf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_vfprintf:
case LibFunc_vsprintf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_vsnprintf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 2);
Changed |= setOnlyReadsMemory(F, 2);
return Changed;
case LibFunc_open:
// May throw; "open" is a valid pthread cancellation point.
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_opendir:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_tmpfile:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
return Changed;
case LibFunc_times:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_htonl:
case LibFunc_htons:
case LibFunc_ntohl:
case LibFunc_ntohs:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotAccessMemory(F);
return Changed;
case LibFunc_lstat:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_lchown:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_qsort:
// May throw; places call through function pointer.
Changed |= setDoesNotCapture(F, 3);
return Changed;
case LibFunc_dunder_strdup:
case LibFunc_dunder_strndup:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_dunder_strtok_r:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_under_IO_getc:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_under_IO_putc:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_dunder_isoc99_scanf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_stat64:
case LibFunc_lstat64:
case LibFunc_statvfs64:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_dunder_isoc99_sscanf:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_fopen64:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 0);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
case LibFunc_fseeko64:
case LibFunc_ftello64:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
return Changed;
case LibFunc_tmpfile64:
Changed |= setDoesNotThrow(F);
Changed |= setRetDoesNotAlias(F);
return Changed;
case LibFunc_fstat64:
case LibFunc_fstatvfs64:
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_open64:
// May throw; "open" is a valid pthread cancellation point.
Changed |= setDoesNotCapture(F, 0);
Changed |= setOnlyReadsMemory(F, 0);
return Changed;
case LibFunc_gettimeofday:
// Currently some platforms have the restrict keyword on the arguments to
// gettimeofday. To be conservative, do not add noalias to gettimeofday's
// arguments.
Changed |= setDoesNotThrow(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
return Changed;
case LibFunc_Znwj: // new(unsigned int)
case LibFunc_Znwm: // new(unsigned long)
case LibFunc_Znaj: // new[](unsigned int)
case LibFunc_Znam: // new[](unsigned long)
case LibFunc_msvc_new_int: // new(unsigned int)
case LibFunc_msvc_new_longlong: // new(unsigned long long)
case LibFunc_msvc_new_array_int: // new[](unsigned int)
case LibFunc_msvc_new_array_longlong: // new[](unsigned long long)
// Operator new always returns a nonnull noalias pointer
Changed |= setRetNonNull(F);
Changed |= setRetDoesNotAlias(F);
return Changed;
// TODO: add LibFunc entries for:
// case LibFunc_memset_pattern4:
// case LibFunc_memset_pattern8:
case LibFunc_memset_pattern16:
Changed |= setOnlyAccessesArgMemory(F);
Changed |= setDoesNotCapture(F, 0);
Changed |= setDoesNotCapture(F, 1);
Changed |= setOnlyReadsMemory(F, 1);
return Changed;
// int __nvvm_reflect(const char *)
case LibFunc_nvvm_reflect:
Changed |= setDoesNotAccessMemory(F);
Changed |= setDoesNotThrow(F);
return Changed;
default:
// FIXME: It'd be really nice to cover all the library functions we're
// aware of here.
return false;
}
}
bool llvm::hasUnaryFloatFn(const TargetLibraryInfo *TLI, Type *Ty,
LibFunc DoubleFn, LibFunc FloatFn,
LibFunc LongDoubleFn) {
switch (Ty->getTypeID()) {
case Type::FloatTyID:
return TLI->has(FloatFn);
case Type::DoubleTyID:
return TLI->has(DoubleFn);
default:
return TLI->has(LongDoubleFn);
}
}
//- Emit LibCalls ------------------------------------------------------------//
Value *llvm::castToCStr(Value *V, IRBuilder<> &B) {
unsigned AS = V->getType()->getPointerAddressSpace();
return B.CreateBitCast(V, B.getInt8PtrTy(AS), "cstr");
}
Value *llvm::emitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_strlen))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
+ StringRef StrlenName = TLI->getName(LibFunc_strlen);
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Constant *StrLen = M->getOrInsertFunction("strlen", DL.getIntPtrType(Context),
+ Constant *StrLen = M->getOrInsertFunction(StrlenName, DL.getIntPtrType(Context),
B.getInt8PtrTy());
- inferLibFuncAttributes(*M->getFunction("strlen"), *TLI);
- CallInst *CI = B.CreateCall(StrLen, castToCStr(Ptr, B), "strlen");
+ inferLibFuncAttributes(M, StrlenName, *TLI);
+ CallInst *CI = B.CreateCall(StrLen, castToCStr(Ptr, B), StrlenName);
if (const Function *F = dyn_cast<Function>(StrLen->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitStrChr(Value *Ptr, char C, IRBuilder<> &B,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_strchr))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
+ StringRef StrChrName = TLI->getName(LibFunc_strchr);
Type *I8Ptr = B.getInt8PtrTy();
Type *I32Ty = B.getInt32Ty();
Constant *StrChr =
- M->getOrInsertFunction("strchr", I8Ptr, I8Ptr, I32Ty);
- inferLibFuncAttributes(*M->getFunction("strchr"), *TLI);
+ M->getOrInsertFunction(StrChrName, I8Ptr, I8Ptr, I32Ty);
+ inferLibFuncAttributes(M, StrChrName, *TLI);
CallInst *CI = B.CreateCall(
- StrChr, {castToCStr(Ptr, B), ConstantInt::get(I32Ty, C)}, "strchr");
+ StrChr, {castToCStr(Ptr, B), ConstantInt::get(I32Ty, C)}, StrChrName);
if (const Function *F = dyn_cast<Function>(StrChr->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
const DataLayout &DL, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_strncmp))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
+ StringRef StrNCmpName = TLI->getName(LibFunc_strncmp);
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Value *StrNCmp = M->getOrInsertFunction("strncmp", B.getInt32Ty(),
+ Value *StrNCmp = M->getOrInsertFunction(StrNCmpName, B.getInt32Ty(),
B.getInt8PtrTy(), B.getInt8PtrTy(),
DL.getIntPtrType(Context));
- inferLibFuncAttributes(*M->getFunction("strncmp"), *TLI);
+ inferLibFuncAttributes(M, StrNCmpName, *TLI);
CallInst *CI = B.CreateCall(
- StrNCmp, {castToCStr(Ptr1, B), castToCStr(Ptr2, B), Len}, "strncmp");
+ StrNCmp, {castToCStr(Ptr1, B), castToCStr(Ptr2, B), Len}, StrNCmpName);
if (const Function *F = dyn_cast<Function>(StrNCmp->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
const TargetLibraryInfo *TLI, StringRef Name) {
if (!TLI->has(LibFunc_strcpy))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
Type *I8Ptr = B.getInt8PtrTy();
Value *StrCpy = M->getOrInsertFunction(Name, I8Ptr, I8Ptr, I8Ptr);
- inferLibFuncAttributes(*M->getFunction(Name), *TLI);
+ inferLibFuncAttributes(M, Name, *TLI);
CallInst *CI =
B.CreateCall(StrCpy, {castToCStr(Dst, B), castToCStr(Src, B)}, Name);
if (const Function *F = dyn_cast<Function>(StrCpy->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
const TargetLibraryInfo *TLI, StringRef Name) {
if (!TLI->has(LibFunc_strncpy))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
Type *I8Ptr = B.getInt8PtrTy();
Value *StrNCpy = M->getOrInsertFunction(Name, I8Ptr, I8Ptr, I8Ptr,
Len->getType());
- inferLibFuncAttributes(*M->getFunction(Name), *TLI);
+ inferLibFuncAttributes(M, Name, *TLI);
CallInst *CI = B.CreateCall(
- StrNCpy, {castToCStr(Dst, B), castToCStr(Src, B), Len}, "strncpy");
+ StrNCpy, {castToCStr(Dst, B), castToCStr(Src, B), Len}, Name);
if (const Function *F = dyn_cast<Function>(StrNCpy->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_memcpy_chk))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
AttributeList AS;
AS = AttributeList::get(M->getContext(), AttributeList::FunctionIndex,
Attribute::NoUnwind);
LLVMContext &Context = B.GetInsertBlock()->getContext();
Value *MemCpy = M->getOrInsertFunction(
"__memcpy_chk", AttributeList::get(M->getContext(), AS), B.getInt8PtrTy(),
B.getInt8PtrTy(), B.getInt8PtrTy(), DL.getIntPtrType(Context),
DL.getIntPtrType(Context));
Dst = castToCStr(Dst, B);
Src = castToCStr(Src, B);
CallInst *CI = B.CreateCall(MemCpy, {Dst, Src, Len, ObjSize});
if (const Function *F = dyn_cast<Function>(MemCpy->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
const DataLayout &DL, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_memchr))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
+ StringRef MemChrName = TLI->getName(LibFunc_memchr);
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Value *MemChr = M->getOrInsertFunction("memchr", B.getInt8PtrTy(),
+ Value *MemChr = M->getOrInsertFunction(MemChrName, B.getInt8PtrTy(),
B.getInt8PtrTy(), B.getInt32Ty(),
DL.getIntPtrType(Context));
- inferLibFuncAttributes(*M->getFunction("memchr"), *TLI);
- CallInst *CI = B.CreateCall(MemChr, {castToCStr(Ptr, B), Val, Len}, "memchr");
+ inferLibFuncAttributes(M, MemChrName, *TLI);
+ CallInst *CI = B.CreateCall(MemChr, {castToCStr(Ptr, B), Val, Len}, MemChrName);
if (const Function *F = dyn_cast<Function>(MemChr->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
const DataLayout &DL, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_memcmp))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
+ StringRef MemCmpName = TLI->getName(LibFunc_memcmp);
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Value *MemCmp = M->getOrInsertFunction("memcmp", B.getInt32Ty(),
+ Value *MemCmp = M->getOrInsertFunction(MemCmpName, B.getInt32Ty(),
B.getInt8PtrTy(), B.getInt8PtrTy(),
DL.getIntPtrType(Context));
- inferLibFuncAttributes(*M->getFunction("memcmp"), *TLI);
+ inferLibFuncAttributes(M, MemCmpName, *TLI);
CallInst *CI = B.CreateCall(
- MemCmp, {castToCStr(Ptr1, B), castToCStr(Ptr2, B), Len}, "memcmp");
+ MemCmp, {castToCStr(Ptr1, B), castToCStr(Ptr2, B), Len}, MemCmpName);
if (const Function *F = dyn_cast<Function>(MemCmp->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
/// Append a suffix to the function name according to the type of 'Op'.
static void appendTypeSuffix(Value *Op, StringRef &Name,
SmallString<20> &NameBuffer) {
if (!Op->getType()->isDoubleTy()) {
NameBuffer += Name;
if (Op->getType()->isFloatTy())
NameBuffer += 'f';
else
NameBuffer += 'l';
Name = NameBuffer;
}
}
Value *llvm::emitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
const AttributeList &Attrs) {
SmallString<20> NameBuffer;
appendTypeSuffix(Op, Name, NameBuffer);
Module *M = B.GetInsertBlock()->getModule();
Value *Callee = M->getOrInsertFunction(Name, Op->getType(),
Op->getType());
CallInst *CI = B.CreateCall(Callee, Op, Name);
// The incoming attribute set may have come from a speculatable intrinsic, but
// is being replaced with a library call which is not allowed to be
// speculatable.
CI->setAttributes(Attrs.removeAttribute(B.getContext(),
AttributeList::FunctionIndex,
Attribute::Speculatable));
if (const Function *F = dyn_cast<Function>(Callee->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitBinaryFloatFnCall(Value *Op1, Value *Op2, StringRef Name,
IRBuilder<> &B, const AttributeList &Attrs) {
SmallString<20> NameBuffer;
appendTypeSuffix(Op1, Name, NameBuffer);
Module *M = B.GetInsertBlock()->getModule();
Value *Callee = M->getOrInsertFunction(Name, Op1->getType(), Op1->getType(),
Op2->getType());
CallInst *CI = B.CreateCall(Callee, {Op1, Op2}, Name);
CI->setAttributes(Attrs);
if (const Function *F = dyn_cast<Function>(Callee->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitPutChar(Value *Char, IRBuilder<> &B,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_putchar))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
- Value *PutChar = M->getOrInsertFunction("putchar", B.getInt32Ty(), B.getInt32Ty());
- inferLibFuncAttributes(*M->getFunction("putchar"), *TLI);
+ StringRef PutCharName = TLI->getName(LibFunc_putchar);
+ Value *PutChar = M->getOrInsertFunction(PutCharName, B.getInt32Ty(), B.getInt32Ty());
+ inferLibFuncAttributes(M, PutCharName, *TLI);
CallInst *CI = B.CreateCall(PutChar,
B.CreateIntCast(Char,
B.getInt32Ty(),
/*isSigned*/true,
"chari"),
- "putchar");
+ PutCharName);
if (const Function *F = dyn_cast<Function>(PutChar->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitPutS(Value *Str, IRBuilder<> &B,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_puts))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
+ StringRef PutsName = TLI->getName(LibFunc_puts);
Value *PutS =
- M->getOrInsertFunction("puts", B.getInt32Ty(), B.getInt8PtrTy());
- inferLibFuncAttributes(*M->getFunction("puts"), *TLI);
- CallInst *CI = B.CreateCall(PutS, castToCStr(Str, B), "puts");
+ M->getOrInsertFunction(PutsName, B.getInt32Ty(), B.getInt8PtrTy());
+ inferLibFuncAttributes(M, PutsName, *TLI);
+ CallInst *CI = B.CreateCall(PutS, castToCStr(Str, B), PutsName);
if (const Function *F = dyn_cast<Function>(PutS->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitFPutC(Value *Char, Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_fputc))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
- Constant *F = M->getOrInsertFunction("fputc", B.getInt32Ty(), B.getInt32Ty(),
+ StringRef FPutcName = TLI->getName(LibFunc_fputc);
+ Constant *F = M->getOrInsertFunction(FPutcName, B.getInt32Ty(), B.getInt32Ty(),
File->getType());
if (File->getType()->isPointerTy())
- inferLibFuncAttributes(*M->getFunction("fputc"), *TLI);
+ inferLibFuncAttributes(M, FPutcName, *TLI);
Char = B.CreateIntCast(Char, B.getInt32Ty(), /*isSigned*/true,
"chari");
- CallInst *CI = B.CreateCall(F, {Char, File}, "fputc");
+ CallInst *CI = B.CreateCall(F, {Char, File}, FPutcName);
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
return CI;
}
Value *llvm::emitFPutCUnlocked(Value *Char, Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_fputc_unlocked))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
- Constant *F = M->getOrInsertFunction("fputc_unlocked", B.getInt32Ty(),
+ StringRef FPutcUnlockedName = TLI->getName(LibFunc_fputc_unlocked);
+ Constant *F = M->getOrInsertFunction(FPutcUnlockedName, B.getInt32Ty(),
B.getInt32Ty(), File->getType());
if (File->getType()->isPointerTy())
- inferLibFuncAttributes(*M->getFunction("fputc_unlocked"), *TLI);
+ inferLibFuncAttributes(M, FPutcUnlockedName, *TLI);
Char = B.CreateIntCast(Char, B.getInt32Ty(), /*isSigned*/ true, "chari");
- CallInst *CI = B.CreateCall(F, {Char, File}, "fputc_unlocked");
+ CallInst *CI = B.CreateCall(F, {Char, File}, FPutcUnlockedName);
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
return CI;
}
Value *llvm::emitFPutS(Value *Str, Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_fputs))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
StringRef FPutsName = TLI->getName(LibFunc_fputs);
Constant *F = M->getOrInsertFunction(
FPutsName, B.getInt32Ty(), B.getInt8PtrTy(), File->getType());
if (File->getType()->isPointerTy())
- inferLibFuncAttributes(*M->getFunction(FPutsName), *TLI);
- CallInst *CI = B.CreateCall(F, {castToCStr(Str, B), File}, "fputs");
+ inferLibFuncAttributes(M, FPutsName, *TLI);
+ CallInst *CI = B.CreateCall(F, {castToCStr(Str, B), File}, FPutsName);
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
return CI;
}
Value *llvm::emitFPutSUnlocked(Value *Str, Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_fputs_unlocked))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
StringRef FPutsUnlockedName = TLI->getName(LibFunc_fputs_unlocked);
Constant *F = M->getOrInsertFunction(FPutsUnlockedName, B.getInt32Ty(),
B.getInt8PtrTy(), File->getType());
if (File->getType()->isPointerTy())
- inferLibFuncAttributes(*M->getFunction(FPutsUnlockedName), *TLI);
- CallInst *CI = B.CreateCall(F, {castToCStr(Str, B), File}, "fputs_unlocked");
+ inferLibFuncAttributes(M, FPutsUnlockedName, *TLI);
+ CallInst *CI = B.CreateCall(F, {castToCStr(Str, B), File}, FPutsUnlockedName);
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
return CI;
}
Value *llvm::emitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
const DataLayout &DL, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_fwrite))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
LLVMContext &Context = B.GetInsertBlock()->getContext();
StringRef FWriteName = TLI->getName(LibFunc_fwrite);
Constant *F = M->getOrInsertFunction(
FWriteName, DL.getIntPtrType(Context), B.getInt8PtrTy(),
DL.getIntPtrType(Context), DL.getIntPtrType(Context), File->getType());
if (File->getType()->isPointerTy())
- inferLibFuncAttributes(*M->getFunction(FWriteName), *TLI);
+ inferLibFuncAttributes(M, FWriteName, *TLI);
CallInst *CI =
B.CreateCall(F, {castToCStr(Ptr, B), Size,
ConstantInt::get(DL.getIntPtrType(Context), 1), File});
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
return CI;
}
Value *llvm::emitMalloc(Value *Num, IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_malloc))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
+ StringRef MallocName = TLI->getName(LibFunc_malloc);
LLVMContext &Context = B.GetInsertBlock()->getContext();
- Value *Malloc = M->getOrInsertFunction("malloc", B.getInt8PtrTy(),
+ Value *Malloc = M->getOrInsertFunction(MallocName, B.getInt8PtrTy(),
DL.getIntPtrType(Context));
- inferLibFuncAttributes(*M->getFunction("malloc"), *TLI);
- CallInst *CI = B.CreateCall(Malloc, Num, "malloc");
+ inferLibFuncAttributes(M, MallocName, *TLI);
+ CallInst *CI = B.CreateCall(Malloc, Num, MallocName);
if (const Function *F = dyn_cast<Function>(Malloc->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitCalloc(Value *Num, Value *Size, const AttributeList &Attrs,
IRBuilder<> &B, const TargetLibraryInfo &TLI) {
if (!TLI.has(LibFunc_calloc))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
+ StringRef CallocName = TLI.getName(LibFunc_calloc);
const DataLayout &DL = M->getDataLayout();
IntegerType *PtrType = DL.getIntPtrType((B.GetInsertBlock()->getContext()));
- Value *Calloc = M->getOrInsertFunction("calloc", Attrs, B.getInt8PtrTy(),
+ Value *Calloc = M->getOrInsertFunction(CallocName, Attrs, B.getInt8PtrTy(),
PtrType, PtrType);
- inferLibFuncAttributes(*M->getFunction("calloc"), TLI);
- CallInst *CI = B.CreateCall(Calloc, {Num, Size}, "calloc");
+ inferLibFuncAttributes(M, CallocName, TLI);
+ CallInst *CI = B.CreateCall(Calloc, {Num, Size}, CallocName);
if (const auto *F = dyn_cast<Function>(Calloc->stripPointerCasts()))
CI->setCallingConv(F->getCallingConv());
return CI;
}
Value *llvm::emitFWriteUnlocked(Value *Ptr, Value *Size, Value *N, Value *File,
IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_fwrite_unlocked))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
LLVMContext &Context = B.GetInsertBlock()->getContext();
StringRef FWriteUnlockedName = TLI->getName(LibFunc_fwrite_unlocked);
Constant *F = M->getOrInsertFunction(
FWriteUnlockedName, DL.getIntPtrType(Context), B.getInt8PtrTy(),
DL.getIntPtrType(Context), DL.getIntPtrType(Context), File->getType());
if (File->getType()->isPointerTy())
- inferLibFuncAttributes(*M->getFunction(FWriteUnlockedName), *TLI);
+ inferLibFuncAttributes(M, FWriteUnlockedName, *TLI);
CallInst *CI = B.CreateCall(F, {castToCStr(Ptr, B), Size, N, File});
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
return CI;
}
Value *llvm::emitFGetCUnlocked(Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_fgetc_unlocked))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
+ StringRef FGetCUnlockedName = TLI->getName(LibFunc_fgetc_unlocked);
Constant *F =
- M->getOrInsertFunction("fgetc_unlocked", B.getInt32Ty(), File->getType());
+ M->getOrInsertFunction(FGetCUnlockedName, B.getInt32Ty(), File->getType());
if (File->getType()->isPointerTy())
- inferLibFuncAttributes(*M->getFunction("fgetc_unlocked"), *TLI);
- CallInst *CI = B.CreateCall(F, File, "fgetc_unlocked");
+ inferLibFuncAttributes(M, FGetCUnlockedName, *TLI);
+ CallInst *CI = B.CreateCall(F, File, FGetCUnlockedName);
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
return CI;
}
Value *llvm::emitFGetSUnlocked(Value *Str, Value *Size, Value *File,
IRBuilder<> &B, const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_fgets_unlocked))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
+ StringRef FGetSUnlockedName = TLI->getName(LibFunc_fgets_unlocked);
Constant *F =
- M->getOrInsertFunction("fgets_unlocked", B.getInt8PtrTy(),
+ M->getOrInsertFunction(FGetSUnlockedName, B.getInt8PtrTy(),
B.getInt8PtrTy(), B.getInt32Ty(), File->getType());
- inferLibFuncAttributes(*M->getFunction("fgets_unlocked"), *TLI);
+ inferLibFuncAttributes(M, FGetSUnlockedName, *TLI);
CallInst *CI =
- B.CreateCall(F, {castToCStr(Str, B), Size, File}, "fgets_unlocked");
+ B.CreateCall(F, {castToCStr(Str, B), Size, File}, FGetSUnlockedName);
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
return CI;
}
Value *llvm::emitFReadUnlocked(Value *Ptr, Value *Size, Value *N, Value *File,
IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI) {
if (!TLI->has(LibFunc_fread_unlocked))
return nullptr;
Module *M = B.GetInsertBlock()->getModule();
LLVMContext &Context = B.GetInsertBlock()->getContext();
StringRef FReadUnlockedName = TLI->getName(LibFunc_fread_unlocked);
Constant *F = M->getOrInsertFunction(
FReadUnlockedName, DL.getIntPtrType(Context), B.getInt8PtrTy(),
DL.getIntPtrType(Context), DL.getIntPtrType(Context), File->getType());
if (File->getType()->isPointerTy())
- inferLibFuncAttributes(*M->getFunction(FReadUnlockedName), *TLI);
+ inferLibFuncAttributes(M, FReadUnlockedName, *TLI);
CallInst *CI = B.CreateCall(F, {castToCStr(Ptr, B), Size, N, File});
if (const Function *Fn = dyn_cast<Function>(F->stripPointerCasts()))
CI->setCallingConv(Fn->getCallingConv());
return CI;
}
Index: projects/clang700-import/contrib/llvm/tools/clang/include/clang/AST/Decl.h
===================================================================
--- projects/clang700-import/contrib/llvm/tools/clang/include/clang/AST/Decl.h (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/clang/include/clang/AST/Decl.h (revision 340125)
@@ -1,4335 +1,4342 @@
//===- Decl.h - Classes for representing declarations -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Decl subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_DECL_H
#define LLVM_CLANG_AST_DECL_H
#include "clang/AST/APValue.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/Redeclarable.h"
#include "clang/AST/Type.h"
#include "clang/Basic/AddressSpaces.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/Linkage.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/Visibility.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TrailingObjects.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
namespace clang {
class ASTContext;
struct ASTTemplateArgumentListInfo;
class Attr;
class CompoundStmt;
class DependentFunctionTemplateSpecializationInfo;
class EnumDecl;
class Expr;
class FunctionTemplateDecl;
class FunctionTemplateSpecializationInfo;
class LabelStmt;
class MemberSpecializationInfo;
class Module;
class NamespaceDecl;
class ParmVarDecl;
class RecordDecl;
class Stmt;
class StringLiteral;
class TagDecl;
class TemplateArgumentList;
class TemplateArgumentListInfo;
class TemplateParameterList;
class TypeAliasTemplateDecl;
class TypeLoc;
class UnresolvedSetImpl;
class VarTemplateDecl;
/// A container of type source information.
///
/// A client can read the relevant info using TypeLoc wrappers, e.g:
/// @code
/// TypeLoc TL = TypeSourceInfo->getTypeLoc();
/// TL.getStartLoc().print(OS, SrcMgr);
/// @endcode
class alignas(8) TypeSourceInfo {
// Contains a memory block after the class, used for type source information,
// allocated by ASTContext.
friend class ASTContext;
QualType Ty;
TypeSourceInfo(QualType ty) : Ty(ty) {}
public:
/// Return the type wrapped by this type source info.
QualType getType() const { return Ty; }
/// Return the TypeLoc wrapper for the type source info.
TypeLoc getTypeLoc() const; // implemented in TypeLoc.h
/// Override the type stored in this TypeSourceInfo. Use with caution!
void overrideType(QualType T) { Ty = T; }
};
/// The top declaration context.
class TranslationUnitDecl : public Decl, public DeclContext {
ASTContext &Ctx;
/// The (most recently entered) anonymous namespace for this
/// translation unit, if one has been created.
NamespaceDecl *AnonymousNamespace = nullptr;
explicit TranslationUnitDecl(ASTContext &ctx);
virtual void anchor();
public:
ASTContext &getASTContext() const { return Ctx; }
NamespaceDecl *getAnonymousNamespace() const { return AnonymousNamespace; }
void setAnonymousNamespace(NamespaceDecl *D) { AnonymousNamespace = D; }
static TranslationUnitDecl *Create(ASTContext &C);
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == TranslationUnit; }
static DeclContext *castToDeclContext(const TranslationUnitDecl *D) {
return static_cast<DeclContext *>(const_cast<TranslationUnitDecl*>(D));
}
static TranslationUnitDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<TranslationUnitDecl *>(const_cast<DeclContext*>(DC));
}
};
/// Represents a `#pragma comment` line. Always a child of
/// TranslationUnitDecl.
class PragmaCommentDecl final
: public Decl,
private llvm::TrailingObjects<PragmaCommentDecl, char> {
friend class ASTDeclReader;
friend class ASTDeclWriter;
friend TrailingObjects;
PragmaMSCommentKind CommentKind;
PragmaCommentDecl(TranslationUnitDecl *TU, SourceLocation CommentLoc,
PragmaMSCommentKind CommentKind)
: Decl(PragmaComment, TU, CommentLoc), CommentKind(CommentKind) {}
virtual void anchor();
public:
static PragmaCommentDecl *Create(const ASTContext &C, TranslationUnitDecl *DC,
SourceLocation CommentLoc,
PragmaMSCommentKind CommentKind,
StringRef Arg);
static PragmaCommentDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned ArgSize);
PragmaMSCommentKind getCommentKind() const { return CommentKind; }
StringRef getArg() const { return getTrailingObjects<char>(); }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == PragmaComment; }
};
/// Represents a `#pragma detect_mismatch` line. Always a child of
/// TranslationUnitDecl.
class PragmaDetectMismatchDecl final
: public Decl,
private llvm::TrailingObjects<PragmaDetectMismatchDecl, char> {
friend class ASTDeclReader;
friend class ASTDeclWriter;
friend TrailingObjects;
size_t ValueStart;
PragmaDetectMismatchDecl(TranslationUnitDecl *TU, SourceLocation Loc,
size_t ValueStart)
: Decl(PragmaDetectMismatch, TU, Loc), ValueStart(ValueStart) {}
virtual void anchor();
public:
static PragmaDetectMismatchDecl *Create(const ASTContext &C,
TranslationUnitDecl *DC,
SourceLocation Loc, StringRef Name,
StringRef Value);
static PragmaDetectMismatchDecl *
CreateDeserialized(ASTContext &C, unsigned ID, unsigned NameValueSize);
StringRef getName() const { return getTrailingObjects<char>(); }
StringRef getValue() const { return getTrailingObjects<char>() + ValueStart; }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == PragmaDetectMismatch; }
};
/// Declaration context for names declared as extern "C" in C++. This
/// is neither the semantic nor lexical context for such declarations, but is
/// used to check for conflicts with other extern "C" declarations. Example:
///
/// \code
/// namespace N { extern "C" void f(); } // #1
/// void N::f() {} // #2
/// namespace M { extern "C" void f(); } // #3
/// \endcode
///
/// The semantic context of #1 is namespace N and its lexical context is the
/// LinkageSpecDecl; the semantic context of #2 is namespace N and its lexical
/// context is the TU. However, both declarations are also visible in the
/// extern "C" context.
///
/// The declaration at #3 finds it is a redeclaration of \c N::f through
/// lookup in the extern "C" context.
class ExternCContextDecl : public Decl, public DeclContext {
explicit ExternCContextDecl(TranslationUnitDecl *TU)
: Decl(ExternCContext, TU, SourceLocation()),
DeclContext(ExternCContext) {}
virtual void anchor();
public:
static ExternCContextDecl *Create(const ASTContext &C,
TranslationUnitDecl *TU);
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == ExternCContext; }
static DeclContext *castToDeclContext(const ExternCContextDecl *D) {
return static_cast<DeclContext *>(const_cast<ExternCContextDecl*>(D));
}
static ExternCContextDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<ExternCContextDecl *>(const_cast<DeclContext*>(DC));
}
};
/// This represents a decl that may have a name. Many decls have names such
/// as ObjCMethodDecl, but not \@class, etc.
///
/// Note that not every NamedDecl is actually named (e.g., a struct might
/// be anonymous), and not every name is an identifier.
class NamedDecl : public Decl {
/// The name of this declaration, which is typically a normal
/// identifier but may also be a special kind of name (C++
/// constructor, Objective-C selector, etc.)
DeclarationName Name;
virtual void anchor();
private:
NamedDecl *getUnderlyingDeclImpl() LLVM_READONLY;
protected:
NamedDecl(Kind DK, DeclContext *DC, SourceLocation L, DeclarationName N)
: Decl(DK, DC, L), Name(N) {}
public:
/// Get the identifier that names this declaration, if there is one.
///
/// This will return NULL if this declaration has no name (e.g., for
/// an unnamed class) or if the name is a special name (C++ constructor,
/// Objective-C selector, etc.).
IdentifierInfo *getIdentifier() const { return Name.getAsIdentifierInfo(); }
/// Get the name of identifier for this declaration as a StringRef.
///
/// This requires that the declaration have a name and that it be a simple
/// identifier.
StringRef getName() const {
assert(Name.isIdentifier() && "Name is not a simple identifier");
return getIdentifier() ? getIdentifier()->getName() : "";
}
/// Get a human-readable name for the declaration, even if it is one of the
/// special kinds of names (C++ constructor, Objective-C selector, etc).
///
/// Creating this name requires expensive string manipulation, so it should
/// be called only when performance doesn't matter. For simple declarations,
/// getNameAsCString() should suffice.
//
// FIXME: This function should be renamed to indicate that it is not just an
// alternate form of getName(), and clients should move as appropriate.
//
// FIXME: Deprecated, move clients to getName().
std::string getNameAsString() const { return Name.getAsString(); }
virtual void printName(raw_ostream &os) const;
/// Get the actual, stored name of the declaration, which may be a special
/// name.
DeclarationName getDeclName() const { return Name; }
/// Set the name of this declaration.
void setDeclName(DeclarationName N) { Name = N; }
/// Returns a human-readable qualified name for this declaration, like
/// A::B::i, for i being member of namespace A::B.
///
/// If the declaration is not a member of context which can be named (record,
/// namespace), it will return the same result as printName().
///
/// Creating this name is expensive, so it should be called only when
/// performance doesn't matter.
void printQualifiedName(raw_ostream &OS) const;
void printQualifiedName(raw_ostream &OS, const PrintingPolicy &Policy) const;
// FIXME: Remove string version.
std::string getQualifiedNameAsString() const;
/// Appends a human-readable name for this declaration into the given stream.
///
/// This is the method invoked by Sema when displaying a NamedDecl
/// in a diagnostic. It does not necessarily produce the same
/// result as printName(); for example, class template
/// specializations are printed with their template arguments.
virtual void getNameForDiagnostic(raw_ostream &OS,
const PrintingPolicy &Policy,
bool Qualified) const;
/// Determine whether this declaration, if known to be well-formed within
/// its context, will replace the declaration OldD if introduced into scope.
///
/// A declaration will replace another declaration if, for example, it is
/// a redeclaration of the same variable or function, but not if it is a
/// declaration of a different kind (function vs. class) or an overloaded
/// function.
///
/// \param IsKnownNewer \c true if this declaration is known to be newer
/// than \p OldD (for instance, if this declaration is newly-created).
bool declarationReplaces(NamedDecl *OldD, bool IsKnownNewer = true) const;
/// Determine whether this declaration has linkage.
bool hasLinkage() const;
using Decl::isModulePrivate;
using Decl::setModulePrivate;
/// Determine whether this declaration is a C++ class member.
bool isCXXClassMember() const {
const DeclContext *DC = getDeclContext();
// C++0x [class.mem]p1:
// The enumerators of an unscoped enumeration defined in
// the class are members of the class.
if (isa<EnumDecl>(DC))
DC = DC->getRedeclContext();
return DC->isRecord();
}
/// Determine whether the given declaration is an instance member of
/// a C++ class.
bool isCXXInstanceMember() const;
/// Determine what kind of linkage this entity has.
///
/// This is not the linkage as defined by the standard or the codegen notion
/// of linkage. It is just an implementation detail that is used to compute
/// those.
Linkage getLinkageInternal() const;
/// Get the linkage from a semantic point of view. Entities in
/// anonymous namespaces are external (in c++98).
Linkage getFormalLinkage() const {
return clang::getFormalLinkage(getLinkageInternal());
}
/// True if this decl has external linkage.
bool hasExternalFormalLinkage() const {
return isExternalFormalLinkage(getLinkageInternal());
}
bool isExternallyVisible() const {
return clang::isExternallyVisible(getLinkageInternal());
}
/// Determine whether this declaration can be redeclared in a
/// different translation unit.
bool isExternallyDeclarable() const {
return isExternallyVisible() && !getOwningModuleForLinkage();
}
/// Determines the visibility of this entity.
Visibility getVisibility() const {
return getLinkageAndVisibility().getVisibility();
}
/// Determines the linkage and visibility of this entity.
LinkageInfo getLinkageAndVisibility() const;
/// Kinds of explicit visibility.
enum ExplicitVisibilityKind {
/// Do an LV computation for, ultimately, a type.
/// Visibility may be restricted by type visibility settings and
/// the visibility of template arguments.
VisibilityForType,
/// Do an LV computation for, ultimately, a non-type declaration.
/// Visibility may be restricted by value visibility settings and
/// the visibility of template arguments.
VisibilityForValue
};
/// If visibility was explicitly specified for this
/// declaration, return that visibility.
Optional<Visibility>
getExplicitVisibility(ExplicitVisibilityKind kind) const;
/// True if the computed linkage is valid. Used for consistency
/// checking. Should always return true.
bool isLinkageValid() const;
/// True if something has required us to compute the linkage
/// of this declaration.
///
/// Language features which can retroactively change linkage (like a
/// typedef name for linkage purposes) may need to consider this,
/// but hopefully only in transitory ways during parsing.
bool hasLinkageBeenComputed() const {
return hasCachedLinkage();
}
/// Looks through UsingDecls and ObjCCompatibleAliasDecls for
/// the underlying named decl.
NamedDecl *getUnderlyingDecl() {
// Fast-path the common case.
if (this->getKind() != UsingShadow &&
this->getKind() != ConstructorUsingShadow &&
this->getKind() != ObjCCompatibleAlias &&
this->getKind() != NamespaceAlias)
return this;
return getUnderlyingDeclImpl();
}
const NamedDecl *getUnderlyingDecl() const {
return const_cast<NamedDecl*>(this)->getUnderlyingDecl();
}
NamedDecl *getMostRecentDecl() {
return cast<NamedDecl>(static_cast<Decl *>(this)->getMostRecentDecl());
}
const NamedDecl *getMostRecentDecl() const {
return const_cast<NamedDecl*>(this)->getMostRecentDecl();
}
ObjCStringFormatFamily getObjCFStringFormattingFamily() const;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K >= firstNamed && K <= lastNamed; }
};
inline raw_ostream &operator<<(raw_ostream &OS, const NamedDecl &ND) {
ND.printName(OS);
return OS;
}
/// Represents the declaration of a label. Labels also have a
/// corresponding LabelStmt, which indicates the position that the label was
/// defined at. For normal labels, the location of the decl is the same as the
/// location of the statement. For GNU local labels (__label__), the decl
/// location is where the __label__ is.
class LabelDecl : public NamedDecl {
LabelStmt *TheStmt;
StringRef MSAsmName;
bool MSAsmNameResolved = false;
/// For normal labels, this is the same as the main declaration
/// label, i.e., the location of the identifier; for GNU local labels,
/// this is the location of the __label__ keyword.
SourceLocation LocStart;
LabelDecl(DeclContext *DC, SourceLocation IdentL, IdentifierInfo *II,
LabelStmt *S, SourceLocation StartL)
: NamedDecl(Label, DC, IdentL, II), TheStmt(S), LocStart(StartL) {}
void anchor() override;
public:
static LabelDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation IdentL, IdentifierInfo *II);
static LabelDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation IdentL, IdentifierInfo *II,
SourceLocation GnuLabelL);
static LabelDecl *CreateDeserialized(ASTContext &C, unsigned ID);
LabelStmt *getStmt() const { return TheStmt; }
void setStmt(LabelStmt *T) { TheStmt = T; }
bool isGnuLocal() const { return LocStart != getLocation(); }
void setLocStart(SourceLocation L) { LocStart = L; }
SourceRange getSourceRange() const override LLVM_READONLY {
return SourceRange(LocStart, getLocation());
}
bool isMSAsmLabel() const { return !MSAsmName.empty(); }
bool isResolvedMSAsmLabel() const { return isMSAsmLabel() && MSAsmNameResolved; }
void setMSAsmLabel(StringRef Name);
StringRef getMSAsmLabel() const { return MSAsmName; }
void setMSAsmLabelResolved() { MSAsmNameResolved = true; }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == Label; }
};
/// Represent a C++ namespace.
class NamespaceDecl : public NamedDecl, public DeclContext,
public Redeclarable<NamespaceDecl>
{
/// The starting location of the source range, pointing
/// to either the namespace or the inline keyword.
SourceLocation LocStart;
/// The ending location of the source range.
SourceLocation RBraceLoc;
/// A pointer to either the anonymous namespace that lives just inside
/// this namespace or to the first namespace in the chain (the latter case
/// only when this is not the first in the chain), along with a
/// boolean value indicating whether this is an inline namespace.
llvm::PointerIntPair<NamespaceDecl *, 1, bool> AnonOrFirstNamespaceAndInline;
NamespaceDecl(ASTContext &C, DeclContext *DC, bool Inline,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, NamespaceDecl *PrevDecl);
using redeclarable_base = Redeclarable<NamespaceDecl>;
NamespaceDecl *getNextRedeclarationImpl() override;
NamespaceDecl *getPreviousDeclImpl() override;
NamespaceDecl *getMostRecentDeclImpl() override;
public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
static NamespaceDecl *Create(ASTContext &C, DeclContext *DC,
bool Inline, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
NamespaceDecl *PrevDecl);
static NamespaceDecl *CreateDeserialized(ASTContext &C, unsigned ID);
using redecl_range = redeclarable_base::redecl_range;
using redecl_iterator = redeclarable_base::redecl_iterator;
using redeclarable_base::redecls_begin;
using redeclarable_base::redecls_end;
using redeclarable_base::redecls;
using redeclarable_base::getPreviousDecl;
using redeclarable_base::getMostRecentDecl;
using redeclarable_base::isFirstDecl;
/// Returns true if this is an anonymous namespace declaration.
///
/// For example:
/// \code
/// namespace {
/// ...
/// };
/// \endcode
/// q.v. C++ [namespace.unnamed]
bool isAnonymousNamespace() const {
return !getIdentifier();
}
/// Returns true if this is an inline namespace declaration.
bool isInline() const {
return AnonOrFirstNamespaceAndInline.getInt();
}
/// Set whether this is an inline namespace declaration.
void setInline(bool Inline) {
AnonOrFirstNamespaceAndInline.setInt(Inline);
}
/// Get the original (first) namespace declaration.
NamespaceDecl *getOriginalNamespace();
/// Get the original (first) namespace declaration.
const NamespaceDecl *getOriginalNamespace() const;
/// Return true if this declaration is an original (first) declaration
/// of the namespace. This is false for non-original (subsequent) namespace
/// declarations and anonymous namespaces.
bool isOriginalNamespace() const;
/// Retrieve the anonymous namespace nested inside this namespace,
/// if any.
NamespaceDecl *getAnonymousNamespace() const {
return getOriginalNamespace()->AnonOrFirstNamespaceAndInline.getPointer();
}
void setAnonymousNamespace(NamespaceDecl *D) {
getOriginalNamespace()->AnonOrFirstNamespaceAndInline.setPointer(D);
}
/// Retrieves the canonical declaration of this namespace.
NamespaceDecl *getCanonicalDecl() override {
return getOriginalNamespace();
}
const NamespaceDecl *getCanonicalDecl() const {
return getOriginalNamespace();
}
SourceRange getSourceRange() const override LLVM_READONLY {
return SourceRange(LocStart, RBraceLoc);
}
SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); }
SourceLocation getBeginLoc() const LLVM_READONLY { return LocStart; }
SourceLocation getRBraceLoc() const { return RBraceLoc; }
void setLocStart(SourceLocation L) { LocStart = L; }
void setRBraceLoc(SourceLocation L) { RBraceLoc = L; }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == Namespace; }
static DeclContext *castToDeclContext(const NamespaceDecl *D) {
return static_cast<DeclContext *>(const_cast<NamespaceDecl*>(D));
}
static NamespaceDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<NamespaceDecl *>(const_cast<DeclContext*>(DC));
}
};
/// Represent the declaration of a variable (in which case it is
/// an lvalue) a function (in which case it is a function designator) or
/// an enum constant.
class ValueDecl : public NamedDecl {
QualType DeclType;
void anchor() override;
protected:
ValueDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName N, QualType T)
: NamedDecl(DK, DC, L, N), DeclType(T) {}
public:
QualType getType() const { return DeclType; }
void setType(QualType newType) { DeclType = newType; }
/// Determine whether this symbol is weakly-imported,
/// or declared with the weak or weak-ref attr.
bool isWeak() const;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K >= firstValue && K <= lastValue; }
};
/// A struct with extended info about a syntactic
/// name qualifier, to be used for the case of out-of-line declarations.
struct QualifierInfo {
NestedNameSpecifierLoc QualifierLoc;
/// The number of "outer" template parameter lists.
/// The count includes all of the template parameter lists that were matched
/// against the template-ids occurring into the NNS and possibly (in the
/// case of an explicit specialization) a final "template <>".
unsigned NumTemplParamLists = 0;
/// A new-allocated array of size NumTemplParamLists,
/// containing pointers to the "outer" template parameter lists.
/// It includes all of the template parameter lists that were matched
/// against the template-ids occurring into the NNS and possibly (in the
/// case of an explicit specialization) a final "template <>".
TemplateParameterList** TemplParamLists = nullptr;
QualifierInfo() = default;
QualifierInfo(const QualifierInfo &) = delete;
QualifierInfo& operator=(const QualifierInfo &) = delete;
/// Sets info about "outer" template parameter lists.
void setTemplateParameterListsInfo(ASTContext &Context,
ArrayRef<TemplateParameterList *> TPLists);
};
/// Represents a ValueDecl that came out of a declarator.
/// Contains type source information through TypeSourceInfo.
class DeclaratorDecl : public ValueDecl {
// A struct representing both a TInfo and a syntactic qualifier,
// to be used for the (uncommon) case of out-of-line declarations.
struct ExtInfo : public QualifierInfo {
TypeSourceInfo *TInfo;
};
llvm::PointerUnion<TypeSourceInfo *, ExtInfo *> DeclInfo;
/// The start of the source range for this declaration,
/// ignoring outer template declarations.
SourceLocation InnerLocStart;
bool hasExtInfo() const { return DeclInfo.is<ExtInfo*>(); }
ExtInfo *getExtInfo() { return DeclInfo.get<ExtInfo*>(); }
const ExtInfo *getExtInfo() const { return DeclInfo.get<ExtInfo*>(); }
protected:
DeclaratorDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName N, QualType T, TypeSourceInfo *TInfo,
SourceLocation StartL)
: ValueDecl(DK, DC, L, N, T), DeclInfo(TInfo), InnerLocStart(StartL) {}
public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
TypeSourceInfo *getTypeSourceInfo() const {
return hasExtInfo()
? getExtInfo()->TInfo
: DeclInfo.get<TypeSourceInfo*>();
}
void setTypeSourceInfo(TypeSourceInfo *TI) {
if (hasExtInfo())
getExtInfo()->TInfo = TI;
else
DeclInfo = TI;
}
/// Return start of source range ignoring outer template declarations.
SourceLocation getInnerLocStart() const { return InnerLocStart; }
void setInnerLocStart(SourceLocation L) { InnerLocStart = L; }
/// Return start of source range taking into account any outer template
/// declarations.
SourceLocation getOuterLocStart() const;
SourceRange getSourceRange() const override LLVM_READONLY;
SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); }
SourceLocation getBeginLoc() const LLVM_READONLY {
return getOuterLocStart();
}
/// Retrieve the nested-name-specifier that qualifies the name of this
/// declaration, if it was present in the source.
NestedNameSpecifier *getQualifier() const {
return hasExtInfo() ? getExtInfo()->QualifierLoc.getNestedNameSpecifier()
: nullptr;
}
/// Retrieve the nested-name-specifier (with source-location
/// information) that qualifies the name of this declaration, if it was
/// present in the source.
NestedNameSpecifierLoc getQualifierLoc() const {
return hasExtInfo() ? getExtInfo()->QualifierLoc
: NestedNameSpecifierLoc();
}
void setQualifierInfo(NestedNameSpecifierLoc QualifierLoc);
unsigned getNumTemplateParameterLists() const {
return hasExtInfo() ? getExtInfo()->NumTemplParamLists : 0;
}
TemplateParameterList *getTemplateParameterList(unsigned index) const {
assert(index < getNumTemplateParameterLists());
return getExtInfo()->TemplParamLists[index];
}
void setTemplateParameterListsInfo(ASTContext &Context,
ArrayRef<TemplateParameterList *> TPLists);
SourceLocation getTypeSpecStartLoc() const;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) {
return K >= firstDeclarator && K <= lastDeclarator;
}
};
/// Structure used to store a statement, the constant value to
/// which it was evaluated (if any), and whether or not the statement
/// is an integral constant expression (if known).
struct EvaluatedStmt {
/// Whether this statement was already evaluated.
bool WasEvaluated : 1;
/// Whether this statement is being evaluated.
bool IsEvaluating : 1;
/// Whether we already checked whether this statement was an
/// integral constant expression.
bool CheckedICE : 1;
/// Whether we are checking whether this statement is an
/// integral constant expression.
bool CheckingICE : 1;
/// Whether this statement is an integral constant expression,
/// or in C++11, whether the statement is a constant expression. Only
/// valid if CheckedICE is true.
bool IsICE : 1;
Stmt *Value;
APValue Evaluated;
EvaluatedStmt() : WasEvaluated(false), IsEvaluating(false), CheckedICE(false),
CheckingICE(false), IsICE(false) {}
};
/// Represents a variable declaration or definition.
class VarDecl : public DeclaratorDecl, public Redeclarable<VarDecl> {
public:
/// Initialization styles.
enum InitializationStyle {
/// C-style initialization with assignment
CInit,
/// Call-style initialization (C++98)
CallInit,
/// Direct list-initialization (C++11)
ListInit
};
/// Kinds of thread-local storage.
enum TLSKind {
/// Not a TLS variable.
TLS_None,
/// TLS with a known-constant initializer.
TLS_Static,
/// TLS with a dynamic initializer.
TLS_Dynamic
};
/// Return the string used to specify the storage class \p SC.
///
/// It is illegal to call this function with SC == None.
static const char *getStorageClassSpecifierString(StorageClass SC);
protected:
// A pointer union of Stmt * and EvaluatedStmt *. When an EvaluatedStmt, we
// have allocated the auxiliary struct of information there.
//
// TODO: It is a bit unfortunate to use a PointerUnion inside the VarDecl for
// this as *many* VarDecls are ParmVarDecls that don't have default
// arguments. We could save some space by moving this pointer union to be
// allocated in trailing space when necessary.
using InitType = llvm::PointerUnion<Stmt *, EvaluatedStmt *>;
/// The initializer for this variable or, for a ParmVarDecl, the
/// C++ default argument.
mutable InitType Init;
private:
friend class ASTDeclReader;
friend class ASTNodeImporter;
friend class StmtIteratorBase;
class VarDeclBitfields {
friend class ASTDeclReader;
friend class VarDecl;
unsigned SClass : 3;
unsigned TSCSpec : 2;
unsigned InitStyle : 2;
};
enum { NumVarDeclBits = 7 };
protected:
enum { NumParameterIndexBits = 8 };
enum DefaultArgKind {
DAK_None,
DAK_Unparsed,
DAK_Uninstantiated,
DAK_Normal
};
class ParmVarDeclBitfields {
friend class ASTDeclReader;
friend class ParmVarDecl;
unsigned : NumVarDeclBits;
/// Whether this parameter inherits a default argument from a
/// prior declaration.
unsigned HasInheritedDefaultArg : 1;
/// Describes the kind of default argument for this parameter. By default
/// this is none. If this is normal, then the default argument is stored in
/// the \c VarDecl initializer expression unless we were unable to parse
/// (even an invalid) expression for the default argument.
unsigned DefaultArgKind : 2;
/// Whether this parameter undergoes K&R argument promotion.
unsigned IsKNRPromoted : 1;
/// Whether this parameter is an ObjC method parameter or not.
unsigned IsObjCMethodParam : 1;
/// If IsObjCMethodParam, a Decl::ObjCDeclQualifier.
/// Otherwise, the number of function parameter scopes enclosing
/// the function parameter scope in which this parameter was
/// declared.
unsigned ScopeDepthOrObjCQuals : 7;
/// The number of parameters preceding this parameter in the
/// function parameter scope in which it was declared.
unsigned ParameterIndex : NumParameterIndexBits;
};
class NonParmVarDeclBitfields {
friend class ASTDeclReader;
friend class ImplicitParamDecl;
friend class VarDecl;
unsigned : NumVarDeclBits;
// FIXME: We need something similar to CXXRecordDecl::DefinitionData.
/// Whether this variable is a definition which was demoted due to
/// module merge.
unsigned IsThisDeclarationADemotedDefinition : 1;
/// Whether this variable is the exception variable in a C++ catch
/// or an Objective-C @catch statement.
unsigned ExceptionVar : 1;
/// Whether this local variable could be allocated in the return
/// slot of its function, enabling the named return value optimization
/// (NRVO).
unsigned NRVOVariable : 1;
/// Whether this variable is the for-range-declaration in a C++0x
/// for-range statement.
unsigned CXXForRangeDecl : 1;
/// Whether this variable is the for-in loop declaration in Objective-C.
unsigned ObjCForDecl : 1;
/// Whether this variable is an ARC pseudo-__strong
/// variable; see isARCPseudoStrong() for details.
unsigned ARCPseudoStrong : 1;
/// Whether this variable is (C++1z) inline.
unsigned IsInline : 1;
/// Whether this variable has (C++1z) inline explicitly specified.
unsigned IsInlineSpecified : 1;
/// Whether this variable is (C++0x) constexpr.
unsigned IsConstexpr : 1;
/// Whether this variable is the implicit variable for a lambda
/// init-capture.
unsigned IsInitCapture : 1;
/// Whether this local extern variable's previous declaration was
/// declared in the same block scope. This controls whether we should merge
/// the type of this declaration with its previous declaration.
unsigned PreviousDeclInSameBlockScope : 1;
/// Defines kind of the ImplicitParamDecl: 'this', 'self', 'vtt', '_cmd' or
/// something else.
unsigned ImplicitParamKind : 3;
};
union {
unsigned AllBits;
VarDeclBitfields VarDeclBits;
ParmVarDeclBitfields ParmVarDeclBits;
NonParmVarDeclBitfields NonParmVarDeclBits;
};
VarDecl(Kind DK, ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo, StorageClass SC);
using redeclarable_base = Redeclarable<VarDecl>;
VarDecl *getNextRedeclarationImpl() override {
return getNextRedeclaration();
}
VarDecl *getPreviousDeclImpl() override {
return getPreviousDecl();
}
VarDecl *getMostRecentDeclImpl() override {
return getMostRecentDecl();
}
public:
using redecl_range = redeclarable_base::redecl_range;
using redecl_iterator = redeclarable_base::redecl_iterator;
using redeclarable_base::redecls_begin;
using redeclarable_base::redecls_end;
using redeclarable_base::redecls;
using redeclarable_base::getPreviousDecl;
using redeclarable_base::getMostRecentDecl;
using redeclarable_base::isFirstDecl;
static VarDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
StorageClass S);
static VarDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceRange getSourceRange() const override LLVM_READONLY;
/// Returns the storage class as written in the source. For the
/// computed linkage of symbol, see getLinkage.
StorageClass getStorageClass() const {
return (StorageClass) VarDeclBits.SClass;
}
void setStorageClass(StorageClass SC);
void setTSCSpec(ThreadStorageClassSpecifier TSC) {
VarDeclBits.TSCSpec = TSC;
assert(VarDeclBits.TSCSpec == TSC && "truncation");
}
ThreadStorageClassSpecifier getTSCSpec() const {
return static_cast<ThreadStorageClassSpecifier>(VarDeclBits.TSCSpec);
}
TLSKind getTLSKind() const;
/// Returns true if a variable with function scope is a non-static local
/// variable.
bool hasLocalStorage() const {
if (getStorageClass() == SC_None) {
// OpenCL v1.2 s6.5.3: The __constant or constant address space name is
// used to describe variables allocated in global memory and which are
// accessed inside a kernel(s) as read-only variables. As such, variables
// in constant address space cannot have local storage.
if (getType().getAddressSpace() == LangAS::opencl_constant)
return false;
// Second check is for C++11 [dcl.stc]p4.
return !isFileVarDecl() && getTSCSpec() == TSCS_unspecified;
}
// Global Named Register (GNU extension)
if (getStorageClass() == SC_Register && !isLocalVarDeclOrParm())
return false;
// Return true for: Auto, Register.
// Return false for: Extern, Static, PrivateExtern, OpenCLWorkGroupLocal.
return getStorageClass() >= SC_Auto;
}
/// Returns true if a variable with function scope is a static local
/// variable.
bool isStaticLocal() const {
return (getStorageClass() == SC_Static ||
// C++11 [dcl.stc]p4
(getStorageClass() == SC_None && getTSCSpec() == TSCS_thread_local))
&& !isFileVarDecl();
}
/// Returns true if a variable has extern or __private_extern__
/// storage.
bool hasExternalStorage() const {
return getStorageClass() == SC_Extern ||
getStorageClass() == SC_PrivateExtern;
}
/// Returns true for all variables that do not have local storage.
///
/// This includes all global variables as well as static variables declared
/// within a function.
bool hasGlobalStorage() const { return !hasLocalStorage(); }
/// Get the storage duration of this variable, per C++ [basic.stc].
StorageDuration getStorageDuration() const {
return hasLocalStorage() ? SD_Automatic :
getTSCSpec() ? SD_Thread : SD_Static;
}
/// Compute the language linkage.
LanguageLinkage getLanguageLinkage() const;
/// Determines whether this variable is a variable with external, C linkage.
bool isExternC() const;
/// Determines whether this variable's context is, or is nested within,
/// a C++ extern "C" linkage spec.
bool isInExternCContext() const;
/// Determines whether this variable's context is, or is nested within,
/// a C++ extern "C++" linkage spec.
bool isInExternCXXContext() const;
/// Returns true for local variable declarations other than parameters.
/// Note that this includes static variables inside of functions. It also
/// includes variables inside blocks.
///
/// void foo() { int x; static int y; extern int z; }
bool isLocalVarDecl() const {
if (getKind() != Decl::Var && getKind() != Decl::Decomposition)
return false;
if (const DeclContext *DC = getLexicalDeclContext())
return DC->getRedeclContext()->isFunctionOrMethod();
return false;
}
/// Similar to isLocalVarDecl but also includes parameters.
bool isLocalVarDeclOrParm() const {
return isLocalVarDecl() || getKind() == Decl::ParmVar;
}
/// Similar to isLocalVarDecl, but excludes variables declared in blocks.
bool isFunctionOrMethodVarDecl() const {
if (getKind() != Decl::Var && getKind() != Decl::Decomposition)
return false;
const DeclContext *DC = getLexicalDeclContext()->getRedeclContext();
return DC->isFunctionOrMethod() && DC->getDeclKind() != Decl::Block;
}
/// Determines whether this is a static data member.
///
/// This will only be true in C++, and applies to, e.g., the
/// variable 'x' in:
/// \code
/// struct S {
/// static int x;
/// };
/// \endcode
bool isStaticDataMember() const {
// If it wasn't static, it would be a FieldDecl.
return getKind() != Decl::ParmVar && getDeclContext()->isRecord();
}
VarDecl *getCanonicalDecl() override;
const VarDecl *getCanonicalDecl() const {
return const_cast<VarDecl*>(this)->getCanonicalDecl();
}
enum DefinitionKind {
/// This declaration is only a declaration.
DeclarationOnly,
/// This declaration is a tentative definition.
TentativeDefinition,
/// This declaration is definitely a definition.
Definition
};
/// Check whether this declaration is a definition. If this could be
/// a tentative definition (in C), don't check whether there's an overriding
/// definition.
DefinitionKind isThisDeclarationADefinition(ASTContext &) const;
DefinitionKind isThisDeclarationADefinition() const {
return isThisDeclarationADefinition(getASTContext());
}
/// Check whether this variable is defined in this translation unit.
DefinitionKind hasDefinition(ASTContext &) const;
DefinitionKind hasDefinition() const {
return hasDefinition(getASTContext());
}
/// Get the tentative definition that acts as the real definition in a TU.
/// Returns null if there is a proper definition available.
VarDecl *getActingDefinition();
const VarDecl *getActingDefinition() const {
return const_cast<VarDecl*>(this)->getActingDefinition();
}
/// Get the real (not just tentative) definition for this declaration.
VarDecl *getDefinition(ASTContext &);
const VarDecl *getDefinition(ASTContext &C) const {
return const_cast<VarDecl*>(this)->getDefinition(C);
}
VarDecl *getDefinition() {
return getDefinition(getASTContext());
}
const VarDecl *getDefinition() const {
return const_cast<VarDecl*>(this)->getDefinition();
}
/// Determine whether this is or was instantiated from an out-of-line
/// definition of a static data member.
bool isOutOfLine() const override;
/// Returns true for file scoped variable declaration.
bool isFileVarDecl() const {
Kind K = getKind();
if (K == ParmVar || K == ImplicitParam)
return false;
if (getLexicalDeclContext()->getRedeclContext()->isFileContext())
return true;
if (isStaticDataMember())
return true;
return false;
}
/// Get the initializer for this variable, no matter which
/// declaration it is attached to.
const Expr *getAnyInitializer() const {
const VarDecl *D;
return getAnyInitializer(D);
}
/// Get the initializer for this variable, no matter which
/// declaration it is attached to. Also get that declaration.
const Expr *getAnyInitializer(const VarDecl *&D) const;
bool hasInit() const;
const Expr *getInit() const {
return const_cast<VarDecl *>(this)->getInit();
}
Expr *getInit();
/// Retrieve the address of the initializer expression.
Stmt **getInitAddress();
void setInit(Expr *I);
/// Determine whether this variable's value can be used in a
/// constant expression, according to the relevant language standard.
/// This only checks properties of the declaration, and does not check
/// whether the initializer is in fact a constant expression.
bool isUsableInConstantExpressions(ASTContext &C) const;
EvaluatedStmt *ensureEvaluatedStmt() const;
/// Attempt to evaluate the value of the initializer attached to this
/// declaration, and produce notes explaining why it cannot be evaluated or is
/// not a constant expression. Returns a pointer to the value if evaluation
/// succeeded, 0 otherwise.
APValue *evaluateValue() const;
APValue *evaluateValue(SmallVectorImpl<PartialDiagnosticAt> &Notes) const;
/// Return the already-evaluated value of this variable's
/// initializer, or NULL if the value is not yet known. Returns pointer
/// to untyped APValue if the value could not be evaluated.
APValue *getEvaluatedValue() const;
/// Determines whether it is already known whether the
/// initializer is an integral constant expression or not.
bool isInitKnownICE() const;
/// Determines whether the initializer is an integral constant
/// expression, or in C++11, whether the initializer is a constant
/// expression.
///
/// \pre isInitKnownICE()
bool isInitICE() const;
/// Determine whether the value of the initializer attached to this
/// declaration is an integral constant expression.
bool checkInitIsICE() const;
void setInitStyle(InitializationStyle Style) {
VarDeclBits.InitStyle = Style;
}
/// The style of initialization for this declaration.
///
/// C-style initialization is "int x = 1;". Call-style initialization is
/// a C++98 direct-initializer, e.g. "int x(1);". The Init expression will be
/// the expression inside the parens or a "ClassType(a,b,c)" class constructor
/// expression for class types. List-style initialization is C++11 syntax,
/// e.g. "int x{1};". Clients can distinguish between different forms of
/// initialization by checking this value. In particular, "int x = {1};" is
/// C-style, "int x({1})" is call-style, and "int x{1};" is list-style; the
/// Init expression in all three cases is an InitListExpr.
InitializationStyle getInitStyle() const {
return static_cast<InitializationStyle>(VarDeclBits.InitStyle);
}
/// Whether the initializer is a direct-initializer (list or call).
bool isDirectInit() const {
return getInitStyle() != CInit;
}
/// If this definition should pretend to be a declaration.
bool isThisDeclarationADemotedDefinition() const {
return isa<ParmVarDecl>(this) ? false :
NonParmVarDeclBits.IsThisDeclarationADemotedDefinition;
}
/// This is a definition which should be demoted to a declaration.
///
/// In some cases (mostly module merging) we can end up with two visible
/// definitions one of which needs to be demoted to a declaration to keep
/// the AST invariants.
void demoteThisDefinitionToDeclaration() {
assert(isThisDeclarationADefinition() && "Not a definition!");
assert(!isa<ParmVarDecl>(this) && "Cannot demote ParmVarDecls!");
NonParmVarDeclBits.IsThisDeclarationADemotedDefinition = 1;
}
/// Determine whether this variable is the exception variable in a
/// C++ catch statememt or an Objective-C \@catch statement.
bool isExceptionVariable() const {
return isa<ParmVarDecl>(this) ? false : NonParmVarDeclBits.ExceptionVar;
}
void setExceptionVariable(bool EV) {
assert(!isa<ParmVarDecl>(this));
NonParmVarDeclBits.ExceptionVar = EV;
}
/// Determine whether this local variable can be used with the named
/// return value optimization (NRVO).
///
/// The named return value optimization (NRVO) works by marking certain
/// non-volatile local variables of class type as NRVO objects. These
/// locals can be allocated within the return slot of their containing
/// function, in which case there is no need to copy the object to the
/// return slot when returning from the function. Within the function body,
/// each return that returns the NRVO object will have this variable as its
/// NRVO candidate.
bool isNRVOVariable() const {
return isa<ParmVarDecl>(this) ? false : NonParmVarDeclBits.NRVOVariable;
}
void setNRVOVariable(bool NRVO) {
assert(!isa<ParmVarDecl>(this));
NonParmVarDeclBits.NRVOVariable = NRVO;
}
/// Determine whether this variable is the for-range-declaration in
/// a C++0x for-range statement.
bool isCXXForRangeDecl() const {
return isa<ParmVarDecl>(this) ? false : NonParmVarDeclBits.CXXForRangeDecl;
}
void setCXXForRangeDecl(bool FRD) {
assert(!isa<ParmVarDecl>(this));
NonParmVarDeclBits.CXXForRangeDecl = FRD;
}
/// Determine whether this variable is a for-loop declaration for a
/// for-in statement in Objective-C.
bool isObjCForDecl() const {
return NonParmVarDeclBits.ObjCForDecl;
}
void setObjCForDecl(bool FRD) {
NonParmVarDeclBits.ObjCForDecl = FRD;
}
/// Determine whether this variable is an ARC pseudo-__strong
/// variable. A pseudo-__strong variable has a __strong-qualified
/// type but does not actually retain the object written into it.
/// Generally such variables are also 'const' for safety.
bool isARCPseudoStrong() const {
return isa<ParmVarDecl>(this) ? false : NonParmVarDeclBits.ARCPseudoStrong;
}
void setARCPseudoStrong(bool ps) {
assert(!isa<ParmVarDecl>(this));
NonParmVarDeclBits.ARCPseudoStrong = ps;
}
/// Whether this variable is (C++1z) inline.
bool isInline() const {
return isa<ParmVarDecl>(this) ? false : NonParmVarDeclBits.IsInline;
}
bool isInlineSpecified() const {
return isa<ParmVarDecl>(this) ? false
: NonParmVarDeclBits.IsInlineSpecified;
}
void setInlineSpecified() {
assert(!isa<ParmVarDecl>(this));
NonParmVarDeclBits.IsInline = true;
NonParmVarDeclBits.IsInlineSpecified = true;
}
void setImplicitlyInline() {
assert(!isa<ParmVarDecl>(this));
NonParmVarDeclBits.IsInline = true;
}
/// Whether this variable is (C++11) constexpr.
bool isConstexpr() const {
return isa<ParmVarDecl>(this) ? false : NonParmVarDeclBits.IsConstexpr;
}
void setConstexpr(bool IC) {
assert(!isa<ParmVarDecl>(this));
NonParmVarDeclBits.IsConstexpr = IC;
}
/// Whether this variable is the implicit variable for a lambda init-capture.
bool isInitCapture() const {
return isa<ParmVarDecl>(this) ? false : NonParmVarDeclBits.IsInitCapture;
}
void setInitCapture(bool IC) {
assert(!isa<ParmVarDecl>(this));
NonParmVarDeclBits.IsInitCapture = IC;
}
/// Whether this local extern variable declaration's previous declaration
/// was declared in the same block scope. Only correct in C++.
bool isPreviousDeclInSameBlockScope() const {
return isa<ParmVarDecl>(this)
? false
: NonParmVarDeclBits.PreviousDeclInSameBlockScope;
}
void setPreviousDeclInSameBlockScope(bool Same) {
assert(!isa<ParmVarDecl>(this));
NonParmVarDeclBits.PreviousDeclInSameBlockScope = Same;
}
/// Retrieve the variable declaration from which this variable could
/// be instantiated, if it is an instantiation (rather than a non-template).
VarDecl *getTemplateInstantiationPattern() const;
/// If this variable is an instantiated static data member of a
/// class template specialization, returns the templated static data member
/// from which it was instantiated.
VarDecl *getInstantiatedFromStaticDataMember() const;
/// If this variable is an instantiation of a variable template or a
/// static data member of a class template, determine what kind of
/// template specialization or instantiation this is.
TemplateSpecializationKind getTemplateSpecializationKind() const;
/// If this variable is an instantiation of a variable template or a
/// static data member of a class template, determine its point of
/// instantiation.
SourceLocation getPointOfInstantiation() const;
/// If this variable is an instantiation of a static data member of a
/// class template specialization, retrieves the member specialization
/// information.
MemberSpecializationInfo *getMemberSpecializationInfo() const;
/// For a static data member that was instantiated from a static
/// data member of a class template, set the template specialiation kind.
void setTemplateSpecializationKind(TemplateSpecializationKind TSK,
SourceLocation PointOfInstantiation = SourceLocation());
/// Specify that this variable is an instantiation of the
/// static data member VD.
void setInstantiationOfStaticDataMember(VarDecl *VD,
TemplateSpecializationKind TSK);
/// Retrieves the variable template that is described by this
/// variable declaration.
///
/// Every variable template is represented as a VarTemplateDecl and a
/// VarDecl. The former contains template properties (such as
/// the template parameter lists) while the latter contains the
/// actual description of the template's
/// contents. VarTemplateDecl::getTemplatedDecl() retrieves the
/// VarDecl that from a VarTemplateDecl, while
/// getDescribedVarTemplate() retrieves the VarTemplateDecl from
/// a VarDecl.
VarTemplateDecl *getDescribedVarTemplate() const;
void setDescribedVarTemplate(VarTemplateDecl *Template);
// Is this variable known to have a definition somewhere in the complete
// program? This may be true even if the declaration has internal linkage and
// has no definition within this source file.
bool isKnownToBeDefined() const;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K >= firstVar && K <= lastVar; }
};
class ImplicitParamDecl : public VarDecl {
void anchor() override;
public:
/// Defines the kind of the implicit parameter: is this an implicit parameter
/// with pointer to 'this', 'self', '_cmd', virtual table pointers, captured
/// context or something else.
enum ImplicitParamKind : unsigned {
/// Parameter for Objective-C 'self' argument
ObjCSelf,
/// Parameter for Objective-C '_cmd' argument
ObjCCmd,
/// Parameter for C++ 'this' argument
CXXThis,
/// Parameter for C++ virtual table pointers
CXXVTT,
/// Parameter for captured context
CapturedContext,
/// Other implicit parameter
Other,
};
/// Create implicit parameter.
static ImplicitParamDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation IdLoc, IdentifierInfo *Id,
QualType T, ImplicitParamKind ParamKind);
static ImplicitParamDecl *Create(ASTContext &C, QualType T,
ImplicitParamKind ParamKind);
static ImplicitParamDecl *CreateDeserialized(ASTContext &C, unsigned ID);
ImplicitParamDecl(ASTContext &C, DeclContext *DC, SourceLocation IdLoc,
IdentifierInfo *Id, QualType Type,
ImplicitParamKind ParamKind)
: VarDecl(ImplicitParam, C, DC, IdLoc, IdLoc, Id, Type,
/*TInfo=*/nullptr, SC_None) {
NonParmVarDeclBits.ImplicitParamKind = ParamKind;
setImplicit();
}
ImplicitParamDecl(ASTContext &C, QualType Type, ImplicitParamKind ParamKind)
: VarDecl(ImplicitParam, C, /*DC=*/nullptr, SourceLocation(),
SourceLocation(), /*Id=*/nullptr, Type,
/*TInfo=*/nullptr, SC_None) {
NonParmVarDeclBits.ImplicitParamKind = ParamKind;
setImplicit();
}
/// Returns the implicit parameter kind.
ImplicitParamKind getParameterKind() const {
return static_cast<ImplicitParamKind>(NonParmVarDeclBits.ImplicitParamKind);
}
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == ImplicitParam; }
};
/// Represents a parameter to a function.
class ParmVarDecl : public VarDecl {
public:
enum { MaxFunctionScopeDepth = 255 };
enum { MaxFunctionScopeIndex = 255 };
protected:
ParmVarDecl(Kind DK, ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo, StorageClass S, Expr *DefArg)
: VarDecl(DK, C, DC, StartLoc, IdLoc, Id, T, TInfo, S) {
assert(ParmVarDeclBits.HasInheritedDefaultArg == false);
assert(ParmVarDeclBits.DefaultArgKind == DAK_None);
assert(ParmVarDeclBits.IsKNRPromoted == false);
assert(ParmVarDeclBits.IsObjCMethodParam == false);
setDefaultArg(DefArg);
}
public:
static ParmVarDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
QualType T, TypeSourceInfo *TInfo,
StorageClass S, Expr *DefArg);
static ParmVarDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceRange getSourceRange() const override LLVM_READONLY;
void setObjCMethodScopeInfo(unsigned parameterIndex) {
ParmVarDeclBits.IsObjCMethodParam = true;
setParameterIndex(parameterIndex);
}
void setScopeInfo(unsigned scopeDepth, unsigned parameterIndex) {
assert(!ParmVarDeclBits.IsObjCMethodParam);
ParmVarDeclBits.ScopeDepthOrObjCQuals = scopeDepth;
assert(ParmVarDeclBits.ScopeDepthOrObjCQuals == scopeDepth
&& "truncation!");
setParameterIndex(parameterIndex);
}
bool isObjCMethodParameter() const {
return ParmVarDeclBits.IsObjCMethodParam;
}
unsigned getFunctionScopeDepth() const {
if (ParmVarDeclBits.IsObjCMethodParam) return 0;
return ParmVarDeclBits.ScopeDepthOrObjCQuals;
}
/// Returns the index of this parameter in its prototype or method scope.
unsigned getFunctionScopeIndex() const {
return getParameterIndex();
}
ObjCDeclQualifier getObjCDeclQualifier() const {
if (!ParmVarDeclBits.IsObjCMethodParam) return OBJC_TQ_None;
return ObjCDeclQualifier(ParmVarDeclBits.ScopeDepthOrObjCQuals);
}
void setObjCDeclQualifier(ObjCDeclQualifier QTVal) {
assert(ParmVarDeclBits.IsObjCMethodParam);
ParmVarDeclBits.ScopeDepthOrObjCQuals = QTVal;
}
/// True if the value passed to this parameter must undergo
/// K&R-style default argument promotion:
///
/// C99 6.5.2.2.
/// If the expression that denotes the called function has a type
/// that does not include a prototype, the integer promotions are
/// performed on each argument, and arguments that have type float
/// are promoted to double.
bool isKNRPromoted() const {
return ParmVarDeclBits.IsKNRPromoted;
}
void setKNRPromoted(bool promoted) {
ParmVarDeclBits.IsKNRPromoted = promoted;
}
Expr *getDefaultArg();
const Expr *getDefaultArg() const {
return const_cast<ParmVarDecl *>(this)->getDefaultArg();
}
void setDefaultArg(Expr *defarg);
/// Retrieve the source range that covers the entire default
/// argument.
SourceRange getDefaultArgRange() const;
void setUninstantiatedDefaultArg(Expr *arg);
Expr *getUninstantiatedDefaultArg();
const Expr *getUninstantiatedDefaultArg() const {
return const_cast<ParmVarDecl *>(this)->getUninstantiatedDefaultArg();
}
/// Determines whether this parameter has a default argument,
/// either parsed or not.
bool hasDefaultArg() const;
/// Determines whether this parameter has a default argument that has not
/// yet been parsed. This will occur during the processing of a C++ class
/// whose member functions have default arguments, e.g.,
/// @code
/// class X {
/// public:
/// void f(int x = 17); // x has an unparsed default argument now
/// }; // x has a regular default argument now
/// @endcode
bool hasUnparsedDefaultArg() const {
return ParmVarDeclBits.DefaultArgKind == DAK_Unparsed;
}
bool hasUninstantiatedDefaultArg() const {
return ParmVarDeclBits.DefaultArgKind == DAK_Uninstantiated;
}
/// Specify that this parameter has an unparsed default argument.
/// The argument will be replaced with a real default argument via
/// setDefaultArg when the class definition enclosing the function
/// declaration that owns this default argument is completed.
void setUnparsedDefaultArg() {
ParmVarDeclBits.DefaultArgKind = DAK_Unparsed;
}
bool hasInheritedDefaultArg() const {
return ParmVarDeclBits.HasInheritedDefaultArg;
}
void setHasInheritedDefaultArg(bool I = true) {
ParmVarDeclBits.HasInheritedDefaultArg = I;
}
QualType getOriginalType() const;
/// Determine whether this parameter is actually a function
/// parameter pack.
bool isParameterPack() const;
/// Sets the function declaration that owns this
/// ParmVarDecl. Since ParmVarDecls are often created before the
/// FunctionDecls that own them, this routine is required to update
/// the DeclContext appropriately.
void setOwningFunction(DeclContext *FD) { setDeclContext(FD); }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == ParmVar; }
private:
enum { ParameterIndexSentinel = (1 << NumParameterIndexBits) - 1 };
void setParameterIndex(unsigned parameterIndex) {
if (parameterIndex >= ParameterIndexSentinel) {
setParameterIndexLarge(parameterIndex);
return;
}
ParmVarDeclBits.ParameterIndex = parameterIndex;
assert(ParmVarDeclBits.ParameterIndex == parameterIndex && "truncation!");
}
unsigned getParameterIndex() const {
unsigned d = ParmVarDeclBits.ParameterIndex;
return d == ParameterIndexSentinel ? getParameterIndexLarge() : d;
}
void setParameterIndexLarge(unsigned parameterIndex);
unsigned getParameterIndexLarge() const;
};
/// Represents a function declaration or definition.
///
/// Since a given function can be declared several times in a program,
/// there may be several FunctionDecls that correspond to that
/// function. Only one of those FunctionDecls will be found when
/// traversing the list of declarations in the context of the
/// FunctionDecl (e.g., the translation unit); this FunctionDecl
/// contains all of the information known about the function. Other,
/// previous declarations of the function are available via the
/// getPreviousDecl() chain.
class FunctionDecl : public DeclaratorDecl, public DeclContext,
public Redeclarable<FunctionDecl> {
public:
/// The kind of templated function a FunctionDecl can be.
enum TemplatedKind {
TK_NonTemplate,
TK_FunctionTemplate,
TK_MemberSpecialization,
TK_FunctionTemplateSpecialization,
TK_DependentFunctionTemplateSpecialization
};
private:
/// A new[]'d array of pointers to VarDecls for the formal
/// parameters of this function. This is null if a prototype or if there are
/// no formals.
ParmVarDecl **ParamInfo = nullptr;
LazyDeclStmtPtr Body;
// FIXME: This can be packed into the bitfields in DeclContext.
// NOTE: VC++ packs bitfields poorly if the types differ.
unsigned SClass : 3;
unsigned IsInline : 1;
unsigned IsInlineSpecified : 1;
protected:
// This is shared by CXXConstructorDecl, CXXConversionDecl, and
// CXXDeductionGuideDecl.
unsigned IsExplicitSpecified : 1;
private:
unsigned IsVirtualAsWritten : 1;
unsigned IsPure : 1;
unsigned HasInheritedPrototype : 1;
unsigned HasWrittenPrototype : 1;
unsigned IsDeleted : 1;
unsigned IsTrivial : 1; // sunk from CXXMethodDecl
/// This flag indicates whether this function is trivial for the purpose of
/// calls. This is meaningful only when this function is a copy/move
/// constructor or a destructor.
unsigned IsTrivialForCall : 1;
unsigned IsDefaulted : 1; // sunk from CXXMethoDecl
unsigned IsExplicitlyDefaulted : 1; //sunk from CXXMethodDecl
unsigned HasImplicitReturnZero : 1;
unsigned IsLateTemplateParsed : 1;
unsigned IsConstexpr : 1;
unsigned InstantiationIsPending : 1;
/// Indicates if the function uses __try.
unsigned UsesSEHTry : 1;
/// Indicates if the function was a definition but its body was
/// skipped.
unsigned HasSkippedBody : 1;
/// Indicates if the function declaration will have a body, once we're done
/// parsing it.
unsigned WillHaveBody : 1;
/// Indicates that this function is a multiversioned function using attribute
/// 'target'.
unsigned IsMultiVersion : 1;
protected:
/// [C++17] Only used by CXXDeductionGuideDecl. Declared here to avoid
/// increasing the size of CXXDeductionGuideDecl by the size of an unsigned
/// int as opposed to adding a single bit to FunctionDecl.
/// Indicates that the Deduction Guide is the implicitly generated 'copy
/// deduction candidate' (is used during overload resolution).
unsigned IsCopyDeductionCandidate : 1;
private:
/// Store the ODRHash after first calculation.
unsigned HasODRHash : 1;
unsigned ODRHash;
/// End part of this FunctionDecl's source range.
///
/// We could compute the full range in getSourceRange(). However, when we're
/// dealing with a function definition deserialized from a PCH/AST file,
/// we can only compute the full range once the function body has been
/// de-serialized, so it's far better to have the (sometimes-redundant)
/// EndRangeLoc.
SourceLocation EndRangeLoc;
/// The template or declaration that this declaration
/// describes or was instantiated from, respectively.
///
/// For non-templates, this value will be NULL. For function
/// declarations that describe a function template, this will be a
/// pointer to a FunctionTemplateDecl. For member functions
/// of class template specializations, this will be a MemberSpecializationInfo
/// pointer containing information about the specialization.
/// For function template specializations, this will be a
/// FunctionTemplateSpecializationInfo, which contains information about
/// the template being specialized and the template arguments involved in
/// that specialization.
llvm::PointerUnion4<FunctionTemplateDecl *,
MemberSpecializationInfo *,
FunctionTemplateSpecializationInfo *,
DependentFunctionTemplateSpecializationInfo *>
TemplateOrSpecialization;
/// Provides source/type location info for the declaration name embedded in
/// the DeclaratorDecl base class.
DeclarationNameLoc DNLoc;
/// Specify that this function declaration is actually a function
/// template specialization.
///
/// \param C the ASTContext.
///
/// \param Template the function template that this function template
/// specialization specializes.
///
/// \param TemplateArgs the template arguments that produced this
/// function template specialization from the template.
///
/// \param InsertPos If non-NULL, the position in the function template
/// specialization set where the function template specialization data will
/// be inserted.
///
/// \param TSK the kind of template specialization this is.
///
/// \param TemplateArgsAsWritten location info of template arguments.
///
/// \param PointOfInstantiation point at which the function template
/// specialization was first instantiated.
void setFunctionTemplateSpecialization(ASTContext &C,
FunctionTemplateDecl *Template,
const TemplateArgumentList *TemplateArgs,
void *InsertPos,
TemplateSpecializationKind TSK,
const TemplateArgumentListInfo *TemplateArgsAsWritten,
SourceLocation PointOfInstantiation);
/// Specify that this record is an instantiation of the
/// member function FD.
void setInstantiationOfMemberFunction(ASTContext &C, FunctionDecl *FD,
TemplateSpecializationKind TSK);
void setParams(ASTContext &C, ArrayRef<ParmVarDecl *> NewParamInfo);
protected:
FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo, QualType T,
TypeSourceInfo *TInfo, StorageClass S, bool isInlineSpecified,
bool isConstexprSpecified)
: DeclaratorDecl(DK, DC, NameInfo.getLoc(), NameInfo.getName(), T, TInfo,
StartLoc),
DeclContext(DK), redeclarable_base(C), SClass(S),
IsInline(isInlineSpecified), IsInlineSpecified(isInlineSpecified),
IsExplicitSpecified(false), IsVirtualAsWritten(false), IsPure(false),
HasInheritedPrototype(false), HasWrittenPrototype(true),
IsDeleted(false), IsTrivial(false), IsTrivialForCall(false),
IsDefaulted(false),
IsExplicitlyDefaulted(false), HasImplicitReturnZero(false),
IsLateTemplateParsed(false), IsConstexpr(isConstexprSpecified),
InstantiationIsPending(false), UsesSEHTry(false), HasSkippedBody(false),
WillHaveBody(false), IsMultiVersion(false),
IsCopyDeductionCandidate(false), HasODRHash(false), ODRHash(0),
EndRangeLoc(NameInfo.getEndLoc()), DNLoc(NameInfo.getInfo()) {}
using redeclarable_base = Redeclarable<FunctionDecl>;
FunctionDecl *getNextRedeclarationImpl() override {
return getNextRedeclaration();
}
FunctionDecl *getPreviousDeclImpl() override {
return getPreviousDecl();
}
FunctionDecl *getMostRecentDeclImpl() override {
return getMostRecentDecl();
}
public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
using redecl_range = redeclarable_base::redecl_range;
using redecl_iterator = redeclarable_base::redecl_iterator;
using redeclarable_base::redecls_begin;
using redeclarable_base::redecls_end;
using redeclarable_base::redecls;
using redeclarable_base::getPreviousDecl;
using redeclarable_base::getMostRecentDecl;
using redeclarable_base::isFirstDecl;
static FunctionDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation NLoc,
DeclarationName N, QualType T,
TypeSourceInfo *TInfo,
StorageClass SC,
bool isInlineSpecified = false,
bool hasWrittenPrototype = true,
bool isConstexprSpecified = false) {
DeclarationNameInfo NameInfo(N, NLoc);
return FunctionDecl::Create(C, DC, StartLoc, NameInfo, T, TInfo,
SC,
isInlineSpecified, hasWrittenPrototype,
isConstexprSpecified);
}
static FunctionDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc,
const DeclarationNameInfo &NameInfo,
QualType T, TypeSourceInfo *TInfo,
StorageClass SC,
bool isInlineSpecified,
bool hasWrittenPrototype,
bool isConstexprSpecified = false);
static FunctionDecl *CreateDeserialized(ASTContext &C, unsigned ID);
DeclarationNameInfo getNameInfo() const {
return DeclarationNameInfo(getDeclName(), getLocation(), DNLoc);
}
void getNameForDiagnostic(raw_ostream &OS, const PrintingPolicy &Policy,
bool Qualified) const override;
void setRangeEnd(SourceLocation E) { EndRangeLoc = E; }
SourceRange getSourceRange() const override LLVM_READONLY;
// Function definitions.
//
// A function declaration may be:
// - a non defining declaration,
// - a definition. A function may be defined because:
// - it has a body, or will have it in the case of late parsing.
// - it has an uninstantiated body. The body does not exist because the
// function is not used yet, but the declaration is considered a
// definition and does not allow other definition of this function.
// - it does not have a user specified body, but it does not allow
// redefinition, because it is deleted/defaulted or is defined through
// some other mechanism (alias, ifunc).
/// Returns true if the function has a body.
///
/// The function body might be in any of the (re-)declarations of this
/// function. The variant that accepts a FunctionDecl pointer will set that
/// function declaration to the actual declaration containing the body (if
/// there is one).
bool hasBody(const FunctionDecl *&Definition) const;
bool hasBody() const override {
const FunctionDecl* Definition;
return hasBody(Definition);
}
/// Returns whether the function has a trivial body that does not require any
/// specific codegen.
bool hasTrivialBody() const;
/// Returns true if the function has a definition that does not need to be
/// instantiated.
///
/// The variant that accepts a FunctionDecl pointer will set that function
/// declaration to the declaration that is a definition (if there is one).
bool isDefined(const FunctionDecl *&Definition) const;
virtual bool isDefined() const {
const FunctionDecl* Definition;
return isDefined(Definition);
}
/// Get the definition for this declaration.
FunctionDecl *getDefinition() {
const FunctionDecl *Definition;
if (isDefined(Definition))
return const_cast<FunctionDecl *>(Definition);
return nullptr;
}
const FunctionDecl *getDefinition() const {
return const_cast<FunctionDecl *>(this)->getDefinition();
}
/// Retrieve the body (definition) of the function. The function body might be
/// in any of the (re-)declarations of this function. The variant that accepts
/// a FunctionDecl pointer will set that function declaration to the actual
/// declaration containing the body (if there is one).
/// NOTE: For checking if there is a body, use hasBody() instead, to avoid
/// unnecessary AST de-serialization of the body.
Stmt *getBody(const FunctionDecl *&Definition) const;
Stmt *getBody() const override {
const FunctionDecl* Definition;
return getBody(Definition);
}
/// Returns whether this specific declaration of the function is also a
/// definition that does not contain uninstantiated body.
///
/// This does not determine whether the function has been defined (e.g., in a
/// previous definition); for that information, use isDefined.
bool isThisDeclarationADefinition() const {
return IsDeleted || IsDefaulted || Body || HasSkippedBody ||
IsLateTemplateParsed || WillHaveBody || hasDefiningAttr();
}
/// Returns whether this specific declaration of the function has a body.
bool doesThisDeclarationHaveABody() const {
return Body || IsLateTemplateParsed;
}
void setBody(Stmt *B);
void setLazyBody(uint64_t Offset) { Body = Offset; }
/// Whether this function is variadic.
bool isVariadic() const;
/// Whether this function is marked as virtual explicitly.
bool isVirtualAsWritten() const { return IsVirtualAsWritten; }
void setVirtualAsWritten(bool V) { IsVirtualAsWritten = V; }
/// Whether this virtual function is pure, i.e. makes the containing class
/// abstract.
bool isPure() const { return IsPure; }
void setPure(bool P = true);
/// Whether this templated function will be late parsed.
bool isLateTemplateParsed() const { return IsLateTemplateParsed; }
void setLateTemplateParsed(bool ILT = true) { IsLateTemplateParsed = ILT; }
/// Whether this function is "trivial" in some specialized C++ senses.
/// Can only be true for default constructors, copy constructors,
/// copy assignment operators, and destructors. Not meaningful until
/// the class has been fully built by Sema.
bool isTrivial() const { return IsTrivial; }
void setTrivial(bool IT) { IsTrivial = IT; }
bool isTrivialForCall() const { return IsTrivialForCall; }
void setTrivialForCall(bool IT) { IsTrivialForCall = IT; }
/// Whether this function is defaulted per C++0x. Only valid for
/// special member functions.
bool isDefaulted() const { return IsDefaulted; }
void setDefaulted(bool D = true) { IsDefaulted = D; }
/// Whether this function is explicitly defaulted per C++0x. Only valid
/// for special member functions.
bool isExplicitlyDefaulted() const { return IsExplicitlyDefaulted; }
void setExplicitlyDefaulted(bool ED = true) { IsExplicitlyDefaulted = ED; }
/// Whether falling off this function implicitly returns null/zero.
/// If a more specific implicit return value is required, front-ends
/// should synthesize the appropriate return statements.
bool hasImplicitReturnZero() const { return HasImplicitReturnZero; }
void setHasImplicitReturnZero(bool IRZ) { HasImplicitReturnZero = IRZ; }
/// Whether this function has a prototype, either because one
/// was explicitly written or because it was "inherited" by merging
/// a declaration without a prototype with a declaration that has a
/// prototype.
bool hasPrototype() const {
return HasWrittenPrototype || HasInheritedPrototype;
}
bool hasWrittenPrototype() const { return HasWrittenPrototype; }
/// Whether this function inherited its prototype from a
/// previous declaration.
bool hasInheritedPrototype() const { return HasInheritedPrototype; }
void setHasInheritedPrototype(bool P = true) { HasInheritedPrototype = P; }
/// Whether this is a (C++11) constexpr function or constexpr constructor.
bool isConstexpr() const { return IsConstexpr; }
void setConstexpr(bool IC) { IsConstexpr = IC; }
/// Whether the instantiation of this function is pending.
/// This bit is set when the decision to instantiate this function is made
/// and unset if and when the function body is created. That leaves out
/// cases where instantiation did not happen because the template definition
/// was not seen in this TU. This bit remains set in those cases, under the
/// assumption that the instantiation will happen in some other TU.
bool instantiationIsPending() const { return InstantiationIsPending; }
void setInstantiationIsPending(bool IC) { InstantiationIsPending = IC; }
/// Indicates the function uses __try.
bool usesSEHTry() const { return UsesSEHTry; }
void setUsesSEHTry(bool UST) { UsesSEHTry = UST; }
/// Whether this function has been deleted.
///
/// A function that is "deleted" (via the C++0x "= delete" syntax)
/// acts like a normal function, except that it cannot actually be
/// called or have its address taken. Deleted functions are
/// typically used in C++ overload resolution to attract arguments
/// whose type or lvalue/rvalue-ness would permit the use of a
/// different overload that would behave incorrectly. For example,
/// one might use deleted functions to ban implicit conversion from
/// a floating-point number to an Integer type:
///
/// @code
/// struct Integer {
/// Integer(long); // construct from a long
/// Integer(double) = delete; // no construction from float or double
/// Integer(long double) = delete; // no construction from long double
/// };
/// @endcode
// If a function is deleted, its first declaration must be.
bool isDeleted() const { return getCanonicalDecl()->IsDeleted; }
bool isDeletedAsWritten() const { return IsDeleted && !IsDefaulted; }
void setDeletedAsWritten(bool D = true) { IsDeleted = D; }
/// Determines whether this function is "main", which is the
/// entry point into an executable program.
bool isMain() const;
/// Determines whether this function is a MSVCRT user defined entry
/// point.
bool isMSVCRTEntryPoint() const;
/// Determines whether this operator new or delete is one
/// of the reserved global placement operators:
/// void *operator new(size_t, void *);
/// void *operator new[](size_t, void *);
/// void operator delete(void *, void *);
/// void operator delete[](void *, void *);
/// These functions have special behavior under [new.delete.placement]:
/// These functions are reserved, a C++ program may not define
/// functions that displace the versions in the Standard C++ library.
/// The provisions of [basic.stc.dynamic] do not apply to these
/// reserved placement forms of operator new and operator delete.
///
/// This function must be an allocation or deallocation function.
bool isReservedGlobalPlacementOperator() const;
/// Determines whether this function is one of the replaceable
/// global allocation functions:
/// void *operator new(size_t);
/// void *operator new(size_t, const std::nothrow_t &) noexcept;
/// void *operator new[](size_t);
/// void *operator new[](size_t, const std::nothrow_t &) noexcept;
/// void operator delete(void *) noexcept;
/// void operator delete(void *, std::size_t) noexcept; [C++1y]
/// void operator delete(void *, const std::nothrow_t &) noexcept;
/// void operator delete[](void *) noexcept;
/// void operator delete[](void *, std::size_t) noexcept; [C++1y]
/// void operator delete[](void *, const std::nothrow_t &) noexcept;
/// These functions have special behavior under C++1y [expr.new]:
/// An implementation is allowed to omit a call to a replaceable global
/// allocation function. [...]
///
/// If this function is an aligned allocation/deallocation function, return
/// true through IsAligned.
bool isReplaceableGlobalAllocationFunction(bool *IsAligned = nullptr) const;
/// Determine whether this is a destroying operator delete.
bool isDestroyingOperatorDelete() const;
/// Compute the language linkage.
LanguageLinkage getLanguageLinkage() const;
/// Determines whether this function is a function with
/// external, C linkage.
bool isExternC() const;
/// Determines whether this function's context is, or is nested within,
/// a C++ extern "C" linkage spec.
bool isInExternCContext() const;
/// Determines whether this function's context is, or is nested within,
/// a C++ extern "C++" linkage spec.
bool isInExternCXXContext() const;
/// Determines whether this is a global function.
bool isGlobal() const;
/// Determines whether this function is known to be 'noreturn', through
/// an attribute on its declaration or its type.
bool isNoReturn() const;
/// True if the function was a definition but its body was skipped.
bool hasSkippedBody() const { return HasSkippedBody; }
void setHasSkippedBody(bool Skipped = true) { HasSkippedBody = Skipped; }
/// True if this function will eventually have a body, once it's fully parsed.
bool willHaveBody() const { return WillHaveBody; }
void setWillHaveBody(bool V = true) { WillHaveBody = V; }
/// True if this function is considered a multiversioned function.
bool isMultiVersion() const { return getCanonicalDecl()->IsMultiVersion; }
/// Sets the multiversion state for this declaration and all of its
/// redeclarations.
void setIsMultiVersion(bool V = true) {
getCanonicalDecl()->IsMultiVersion = V;
}
/// True if this function is a multiversioned dispatch function as a part of
/// the cpu_specific/cpu_dispatch functionality.
bool isCPUDispatchMultiVersion() const;
/// True if this function is a multiversioned processor specific function as a
/// part of the cpu_specific/cpu_dispatch functionality.
bool isCPUSpecificMultiVersion() const;
void setPreviousDeclaration(FunctionDecl * PrevDecl);
FunctionDecl *getCanonicalDecl() override;
const FunctionDecl *getCanonicalDecl() const {
return const_cast<FunctionDecl*>(this)->getCanonicalDecl();
}
unsigned getBuiltinID() const;
// ArrayRef interface to parameters.
ArrayRef<ParmVarDecl *> parameters() const {
return {ParamInfo, getNumParams()};
}
MutableArrayRef<ParmVarDecl *> parameters() {
return {ParamInfo, getNumParams()};
}
// Iterator access to formal parameters.
using param_iterator = MutableArrayRef<ParmVarDecl *>::iterator;
using param_const_iterator = ArrayRef<ParmVarDecl *>::const_iterator;
bool param_empty() const { return parameters().empty(); }
param_iterator param_begin() { return parameters().begin(); }
param_iterator param_end() { return parameters().end(); }
param_const_iterator param_begin() const { return parameters().begin(); }
param_const_iterator param_end() const { return parameters().end(); }
size_t param_size() const { return parameters().size(); }
/// Return the number of parameters this function must have based on its
/// FunctionType. This is the length of the ParamInfo array after it has been
/// created.
unsigned getNumParams() const;
const ParmVarDecl *getParamDecl(unsigned i) const {
assert(i < getNumParams() && "Illegal param #");
return ParamInfo[i];
}
ParmVarDecl *getParamDecl(unsigned i) {
assert(i < getNumParams() && "Illegal param #");
return ParamInfo[i];
}
void setParams(ArrayRef<ParmVarDecl *> NewParamInfo) {
setParams(getASTContext(), NewParamInfo);
}
/// Returns the minimum number of arguments needed to call this function. This
/// may be fewer than the number of function parameters, if some of the
/// parameters have default arguments (in C++).
unsigned getMinRequiredArguments() const;
QualType getReturnType() const {
- assert(getType()->getAs<FunctionType>() && "Expected a FunctionType!");
- return getType()->getAs<FunctionType>()->getReturnType();
+ return getType()->castAs<FunctionType>()->getReturnType();
}
/// Attempt to compute an informative source range covering the
/// function return type. This may omit qualifiers and other information with
/// limited representation in the AST.
SourceRange getReturnTypeSourceRange() const;
+ /// Get the declared return type, which may differ from the actual return
+ /// type if the return type is deduced.
+ QualType getDeclaredReturnType() const {
+ auto *TSI = getTypeSourceInfo();
+ QualType T = TSI ? TSI->getType() : getType();
+ return T->castAs<FunctionType>()->getReturnType();
+ }
+
/// Attempt to compute an informative source range covering the
/// function exception specification, if any.
SourceRange getExceptionSpecSourceRange() const;
/// Determine the type of an expression that calls this function.
QualType getCallResultType() const {
- assert(getType()->getAs<FunctionType>() && "Expected a FunctionType!");
- return getType()->getAs<FunctionType>()->getCallResultType(getASTContext());
+ return getType()->castAs<FunctionType>()->getCallResultType(
+ getASTContext());
}
/// Returns the WarnUnusedResultAttr that is either declared on this
/// function, or its return type declaration.
const Attr *getUnusedResultAttr() const;
/// Returns true if this function or its return type has the
/// warn_unused_result attribute.
bool hasUnusedResultAttr() const { return getUnusedResultAttr() != nullptr; }
/// Returns the storage class as written in the source. For the
/// computed linkage of symbol, see getLinkage.
StorageClass getStorageClass() const { return StorageClass(SClass); }
/// Determine whether the "inline" keyword was specified for this
/// function.
bool isInlineSpecified() const { return IsInlineSpecified; }
/// Set whether the "inline" keyword was specified for this function.
void setInlineSpecified(bool I) {
IsInlineSpecified = I;
IsInline = I;
}
/// Flag that this function is implicitly inline.
void setImplicitlyInline() {
IsInline = true;
}
/// Determine whether this function should be inlined, because it is
/// either marked "inline" or "constexpr" or is a member function of a class
/// that was defined in the class body.
bool isInlined() const { return IsInline; }
bool isInlineDefinitionExternallyVisible() const;
bool isMSExternInline() const;
bool doesDeclarationForceExternallyVisibleDefinition() const;
/// Whether this function declaration represents an C++ overloaded
/// operator, e.g., "operator+".
bool isOverloadedOperator() const {
return getOverloadedOperator() != OO_None;
}
OverloadedOperatorKind getOverloadedOperator() const;
const IdentifierInfo *getLiteralIdentifier() const;
/// If this function is an instantiation of a member function
/// of a class template specialization, retrieves the function from
/// which it was instantiated.
///
/// This routine will return non-NULL for (non-templated) member
/// functions of class templates and for instantiations of function
/// templates. For example, given:
///
/// \code
/// template<typename T>
/// struct X {
/// void f(T);
/// };
/// \endcode
///
/// The declaration for X<int>::f is a (non-templated) FunctionDecl
/// whose parent is the class template specialization X<int>. For
/// this declaration, getInstantiatedFromFunction() will return
/// the FunctionDecl X<T>::A. When a complete definition of
/// X<int>::A is required, it will be instantiated from the
/// declaration returned by getInstantiatedFromMemberFunction().
FunctionDecl *getInstantiatedFromMemberFunction() const;
/// What kind of templated function this is.
TemplatedKind getTemplatedKind() const;
/// If this function is an instantiation of a member function of a
/// class template specialization, retrieves the member specialization
/// information.
MemberSpecializationInfo *getMemberSpecializationInfo() const;
/// Specify that this record is an instantiation of the
/// member function FD.
void setInstantiationOfMemberFunction(FunctionDecl *FD,
TemplateSpecializationKind TSK) {
setInstantiationOfMemberFunction(getASTContext(), FD, TSK);
}
/// Retrieves the function template that is described by this
/// function declaration.
///
/// Every function template is represented as a FunctionTemplateDecl
/// and a FunctionDecl (or something derived from FunctionDecl). The
/// former contains template properties (such as the template
/// parameter lists) while the latter contains the actual
/// description of the template's
/// contents. FunctionTemplateDecl::getTemplatedDecl() retrieves the
/// FunctionDecl that describes the function template,
/// getDescribedFunctionTemplate() retrieves the
/// FunctionTemplateDecl from a FunctionDecl.
FunctionTemplateDecl *getDescribedFunctionTemplate() const;
void setDescribedFunctionTemplate(FunctionTemplateDecl *Template);
/// Determine whether this function is a function template
/// specialization.
bool isFunctionTemplateSpecialization() const {
return getPrimaryTemplate() != nullptr;
}
/// Retrieve the class scope template pattern that this function
/// template specialization is instantiated from.
FunctionDecl *getClassScopeSpecializationPattern() const;
/// If this function is actually a function template specialization,
/// retrieve information about this function template specialization.
/// Otherwise, returns NULL.
FunctionTemplateSpecializationInfo *getTemplateSpecializationInfo() const;
/// Determines whether this function is a function template
/// specialization or a member of a class template specialization that can
/// be implicitly instantiated.
bool isImplicitlyInstantiable() const;
/// Determines if the given function was instantiated from a
/// function template.
bool isTemplateInstantiation() const;
/// Retrieve the function declaration from which this function could
/// be instantiated, if it is an instantiation (rather than a non-template
/// or a specialization, for example).
FunctionDecl *getTemplateInstantiationPattern() const;
/// Retrieve the primary template that this function template
/// specialization either specializes or was instantiated from.
///
/// If this function declaration is not a function template specialization,
/// returns NULL.
FunctionTemplateDecl *getPrimaryTemplate() const;
/// Retrieve the template arguments used to produce this function
/// template specialization from the primary template.
///
/// If this function declaration is not a function template specialization,
/// returns NULL.
const TemplateArgumentList *getTemplateSpecializationArgs() const;
/// Retrieve the template argument list as written in the sources,
/// if any.
///
/// If this function declaration is not a function template specialization
/// or if it had no explicit template argument list, returns NULL.
/// Note that it an explicit template argument list may be written empty,
/// e.g., template<> void foo<>(char* s);
const ASTTemplateArgumentListInfo*
getTemplateSpecializationArgsAsWritten() const;
/// Specify that this function declaration is actually a function
/// template specialization.
///
/// \param Template the function template that this function template
/// specialization specializes.
///
/// \param TemplateArgs the template arguments that produced this
/// function template specialization from the template.
///
/// \param InsertPos If non-NULL, the position in the function template
/// specialization set where the function template specialization data will
/// be inserted.
///
/// \param TSK the kind of template specialization this is.
///
/// \param TemplateArgsAsWritten location info of template arguments.
///
/// \param PointOfInstantiation point at which the function template
/// specialization was first instantiated.
void setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
const TemplateArgumentList *TemplateArgs,
void *InsertPos,
TemplateSpecializationKind TSK = TSK_ImplicitInstantiation,
const TemplateArgumentListInfo *TemplateArgsAsWritten = nullptr,
SourceLocation PointOfInstantiation = SourceLocation()) {
setFunctionTemplateSpecialization(getASTContext(), Template, TemplateArgs,
InsertPos, TSK, TemplateArgsAsWritten,
PointOfInstantiation);
}
/// Specifies that this function declaration is actually a
/// dependent function template specialization.
void setDependentTemplateSpecialization(ASTContext &Context,
const UnresolvedSetImpl &Templates,
const TemplateArgumentListInfo &TemplateArgs);
DependentFunctionTemplateSpecializationInfo *
getDependentSpecializationInfo() const;
/// Determine what kind of template instantiation this function
/// represents.
TemplateSpecializationKind getTemplateSpecializationKind() const;
/// Determine what kind of template instantiation this function
/// represents.
void setTemplateSpecializationKind(TemplateSpecializationKind TSK,
SourceLocation PointOfInstantiation = SourceLocation());
/// Retrieve the (first) point of instantiation of a function template
/// specialization or a member of a class template specialization.
///
/// \returns the first point of instantiation, if this function was
/// instantiated from a template; otherwise, returns an invalid source
/// location.
SourceLocation getPointOfInstantiation() const;
/// Determine whether this is or was instantiated from an out-of-line
/// definition of a member function.
bool isOutOfLine() const override;
/// Identify a memory copying or setting function.
/// If the given function is a memory copy or setting function, returns
/// the corresponding Builtin ID. If the function is not a memory function,
/// returns 0.
unsigned getMemoryFunctionKind() const;
/// Returns ODRHash of the function. This value is calculated and
/// stored on first call, then the stored value returned on the other calls.
unsigned getODRHash();
/// Returns cached ODRHash of the function. This must have been previously
/// computed and stored.
unsigned getODRHash() const;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) {
return K >= firstFunction && K <= lastFunction;
}
static DeclContext *castToDeclContext(const FunctionDecl *D) {
return static_cast<DeclContext *>(const_cast<FunctionDecl*>(D));
}
static FunctionDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<FunctionDecl *>(const_cast<DeclContext*>(DC));
}
};
/// Represents a member of a struct/union/class.
class FieldDecl : public DeclaratorDecl, public Mergeable<FieldDecl> {
unsigned BitField : 1;
unsigned Mutable : 1;
mutable unsigned CachedFieldIndex : 30;
/// The kinds of value we can store in InitializerOrBitWidth.
///
/// Note that this is compatible with InClassInitStyle except for
/// ISK_CapturedVLAType.
enum InitStorageKind {
/// If the pointer is null, there's nothing special. Otherwise,
/// this is a bitfield and the pointer is the Expr* storing the
/// bit-width.
ISK_NoInit = (unsigned) ICIS_NoInit,
/// The pointer is an (optional due to delayed parsing) Expr*
/// holding the copy-initializer.
ISK_InClassCopyInit = (unsigned) ICIS_CopyInit,
/// The pointer is an (optional due to delayed parsing) Expr*
/// holding the list-initializer.
ISK_InClassListInit = (unsigned) ICIS_ListInit,
/// The pointer is a VariableArrayType* that's been captured;
/// the enclosing context is a lambda or captured statement.
ISK_CapturedVLAType,
};
/// If this is a bitfield with a default member initializer, this
/// structure is used to represent the two expressions.
struct InitAndBitWidth {
Expr *Init;
Expr *BitWidth;
};
/// Storage for either the bit-width, the in-class initializer, or
/// both (via InitAndBitWidth), or the captured variable length array bound.
///
/// If the storage kind is ISK_InClassCopyInit or
/// ISK_InClassListInit, but the initializer is null, then this
/// field has an in-class initializer that has not yet been parsed
/// and attached.
// FIXME: Tail-allocate this to reduce the size of FieldDecl in the
// overwhelmingly common case that we have none of these things.
llvm::PointerIntPair<void *, 2, InitStorageKind> InitStorage;
protected:
FieldDecl(Kind DK, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
QualType T, TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
InClassInitStyle InitStyle)
: DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc),
BitField(false), Mutable(Mutable), CachedFieldIndex(0),
InitStorage(nullptr, (InitStorageKind) InitStyle) {
if (BW)
setBitWidth(BW);
}
public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
static FieldDecl *Create(const ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, QualType T,
TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
InClassInitStyle InitStyle);
static FieldDecl *CreateDeserialized(ASTContext &C, unsigned ID);
/// Returns the index of this field within its record,
/// as appropriate for passing to ASTRecordLayout::getFieldOffset.
unsigned getFieldIndex() const;
/// Determines whether this field is mutable (C++ only).
bool isMutable() const { return Mutable; }
/// Determines whether this field is a bitfield.
bool isBitField() const { return BitField; }
/// Determines whether this is an unnamed bitfield.
bool isUnnamedBitfield() const { return isBitField() && !getDeclName(); }
/// Determines whether this field is a
/// representative for an anonymous struct or union. Such fields are
/// unnamed and are implicitly generated by the implementation to
/// store the data for the anonymous union or struct.
bool isAnonymousStructOrUnion() const;
Expr *getBitWidth() const {
if (!BitField)
return nullptr;
void *Ptr = InitStorage.getPointer();
if (getInClassInitStyle())
return static_cast<InitAndBitWidth*>(Ptr)->BitWidth;
return static_cast<Expr*>(Ptr);
}
unsigned getBitWidthValue(const ASTContext &Ctx) const;
/// Set the bit-field width for this member.
// Note: used by some clients (i.e., do not remove it).
void setBitWidth(Expr *Width) {
assert(!hasCapturedVLAType() && !BitField &&
"bit width or captured type already set");
assert(Width && "no bit width specified");
InitStorage.setPointer(
InitStorage.getInt()
? new (getASTContext())
InitAndBitWidth{getInClassInitializer(), Width}
: static_cast<void*>(Width));
BitField = true;
}
/// Remove the bit-field width from this member.
// Note: used by some clients (i.e., do not remove it).
void removeBitWidth() {
assert(isBitField() && "no bitfield width to remove");
InitStorage.setPointer(getInClassInitializer());
BitField = false;
}
/// Is this a zero-length bit-field? Such bit-fields aren't really bit-fields
/// at all and instead act as a separator between contiguous runs of other
/// bit-fields.
bool isZeroLengthBitField(const ASTContext &Ctx) const;
/// Get the kind of (C++11) default member initializer that this field has.
InClassInitStyle getInClassInitStyle() const {
InitStorageKind storageKind = InitStorage.getInt();
return (storageKind == ISK_CapturedVLAType
? ICIS_NoInit : (InClassInitStyle) storageKind);
}
/// Determine whether this member has a C++11 default member initializer.
bool hasInClassInitializer() const {
return getInClassInitStyle() != ICIS_NoInit;
}
/// Get the C++11 default member initializer for this member, or null if one
/// has not been set. If a valid declaration has a default member initializer,
/// but this returns null, then we have not parsed and attached it yet.
Expr *getInClassInitializer() const {
if (!hasInClassInitializer())
return nullptr;
void *Ptr = InitStorage.getPointer();
if (BitField)
return static_cast<InitAndBitWidth*>(Ptr)->Init;
return static_cast<Expr*>(Ptr);
}
/// Set the C++11 in-class initializer for this member.
void setInClassInitializer(Expr *Init) {
assert(hasInClassInitializer() && !getInClassInitializer());
if (BitField)
static_cast<InitAndBitWidth*>(InitStorage.getPointer())->Init = Init;
else
InitStorage.setPointer(Init);
}
/// Remove the C++11 in-class initializer from this member.
void removeInClassInitializer() {
assert(hasInClassInitializer() && "no initializer to remove");
InitStorage.setPointerAndInt(getBitWidth(), ISK_NoInit);
}
/// Determine whether this member captures the variable length array
/// type.
bool hasCapturedVLAType() const {
return InitStorage.getInt() == ISK_CapturedVLAType;
}
/// Get the captured variable length array type.
const VariableArrayType *getCapturedVLAType() const {
return hasCapturedVLAType() ? static_cast<const VariableArrayType *>(
InitStorage.getPointer())
: nullptr;
}
/// Set the captured variable length array type for this field.
void setCapturedVLAType(const VariableArrayType *VLAType);
/// Returns the parent of this field declaration, which
/// is the struct in which this field is defined.
const RecordDecl *getParent() const {
return cast<RecordDecl>(getDeclContext());
}
RecordDecl *getParent() {
return cast<RecordDecl>(getDeclContext());
}
SourceRange getSourceRange() const override LLVM_READONLY;
/// Retrieves the canonical declaration of this field.
FieldDecl *getCanonicalDecl() override { return getFirstDecl(); }
const FieldDecl *getCanonicalDecl() const { return getFirstDecl(); }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K >= firstField && K <= lastField; }
};
/// An instance of this object exists for each enum constant
/// that is defined. For example, in "enum X {a,b}", each of a/b are
/// EnumConstantDecl's, X is an instance of EnumDecl, and the type of a/b is a
/// TagType for the X EnumDecl.
class EnumConstantDecl : public ValueDecl, public Mergeable<EnumConstantDecl> {
Stmt *Init; // an integer constant expression
llvm::APSInt Val; // The value.
protected:
EnumConstantDecl(DeclContext *DC, SourceLocation L,
IdentifierInfo *Id, QualType T, Expr *E,
const llvm::APSInt &V)
: ValueDecl(EnumConstant, DC, L, Id, T), Init((Stmt*)E), Val(V) {}
public:
friend class StmtIteratorBase;
static EnumConstantDecl *Create(ASTContext &C, EnumDecl *DC,
SourceLocation L, IdentifierInfo *Id,
QualType T, Expr *E,
const llvm::APSInt &V);
static EnumConstantDecl *CreateDeserialized(ASTContext &C, unsigned ID);
const Expr *getInitExpr() const { return (const Expr*) Init; }
Expr *getInitExpr() { return (Expr*) Init; }
const llvm::APSInt &getInitVal() const { return Val; }
void setInitExpr(Expr *E) { Init = (Stmt*) E; }
void setInitVal(const llvm::APSInt &V) { Val = V; }
SourceRange getSourceRange() const override LLVM_READONLY;
/// Retrieves the canonical declaration of this enumerator.
EnumConstantDecl *getCanonicalDecl() override { return getFirstDecl(); }
const EnumConstantDecl *getCanonicalDecl() const { return getFirstDecl(); }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == EnumConstant; }
};
/// Represents a field injected from an anonymous union/struct into the parent
/// scope. These are always implicit.
class IndirectFieldDecl : public ValueDecl,
public Mergeable<IndirectFieldDecl> {
NamedDecl **Chaining;
unsigned ChainingSize;
IndirectFieldDecl(ASTContext &C, DeclContext *DC, SourceLocation L,
DeclarationName N, QualType T,
MutableArrayRef<NamedDecl *> CH);
void anchor() override;
public:
friend class ASTDeclReader;
static IndirectFieldDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id,
QualType T, llvm::MutableArrayRef<NamedDecl *> CH);
static IndirectFieldDecl *CreateDeserialized(ASTContext &C, unsigned ID);
using chain_iterator = ArrayRef<NamedDecl *>::const_iterator;
ArrayRef<NamedDecl *> chain() const {
return llvm::makeArrayRef(Chaining, ChainingSize);
}
chain_iterator chain_begin() const { return chain().begin(); }
chain_iterator chain_end() const { return chain().end(); }
unsigned getChainingSize() const { return ChainingSize; }
FieldDecl *getAnonField() const {
assert(chain().size() >= 2);
return cast<FieldDecl>(chain().back());
}
VarDecl *getVarDecl() const {
assert(chain().size() >= 2);
return dyn_cast<VarDecl>(chain().front());
}
IndirectFieldDecl *getCanonicalDecl() override { return getFirstDecl(); }
const IndirectFieldDecl *getCanonicalDecl() const { return getFirstDecl(); }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == IndirectField; }
};
/// Represents a declaration of a type.
class TypeDecl : public NamedDecl {
friend class ASTContext;
/// This indicates the Type object that represents
/// this TypeDecl. It is a cache maintained by
/// ASTContext::getTypedefType, ASTContext::getTagDeclType, and
/// ASTContext::getTemplateTypeParmType, and TemplateTypeParmDecl.
mutable const Type *TypeForDecl = nullptr;
/// The start of the source range for this declaration.
SourceLocation LocStart;
void anchor() override;
protected:
TypeDecl(Kind DK, DeclContext *DC, SourceLocation L, IdentifierInfo *Id,
SourceLocation StartL = SourceLocation())
: NamedDecl(DK, DC, L, Id), LocStart(StartL) {}
public:
// Low-level accessor. If you just want the type defined by this node,
// check out ASTContext::getTypeDeclType or one of
// ASTContext::getTypedefType, ASTContext::getRecordType, etc. if you
// already know the specific kind of node this is.
const Type *getTypeForDecl() const { return TypeForDecl; }
void setTypeForDecl(const Type *TD) { TypeForDecl = TD; }
SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); }
SourceLocation getBeginLoc() const LLVM_READONLY { return LocStart; }
void setLocStart(SourceLocation L) { LocStart = L; }
SourceRange getSourceRange() const override LLVM_READONLY {
if (LocStart.isValid())
return SourceRange(LocStart, getLocation());
else
return SourceRange(getLocation());
}
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K >= firstType && K <= lastType; }
};
/// Base class for declarations which introduce a typedef-name.
class TypedefNameDecl : public TypeDecl, public Redeclarable<TypedefNameDecl> {
struct alignas(8) ModedTInfo {
TypeSourceInfo *first;
QualType second;
};
/// If int part is 0, we have not computed IsTransparentTag.
/// Otherwise, IsTransparentTag is (getInt() >> 1).
mutable llvm::PointerIntPair<
llvm::PointerUnion<TypeSourceInfo *, ModedTInfo *>, 2>
MaybeModedTInfo;
void anchor() override;
protected:
TypedefNameDecl(Kind DK, ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, TypeSourceInfo *TInfo)
: TypeDecl(DK, DC, IdLoc, Id, StartLoc), redeclarable_base(C),
MaybeModedTInfo(TInfo, 0) {}
using redeclarable_base = Redeclarable<TypedefNameDecl>;
TypedefNameDecl *getNextRedeclarationImpl() override {
return getNextRedeclaration();
}
TypedefNameDecl *getPreviousDeclImpl() override {
return getPreviousDecl();
}
TypedefNameDecl *getMostRecentDeclImpl() override {
return getMostRecentDecl();
}
public:
using redecl_range = redeclarable_base::redecl_range;
using redecl_iterator = redeclarable_base::redecl_iterator;
using redeclarable_base::redecls_begin;
using redeclarable_base::redecls_end;
using redeclarable_base::redecls;
using redeclarable_base::getPreviousDecl;
using redeclarable_base::getMostRecentDecl;
using redeclarable_base::isFirstDecl;
bool isModed() const {
return MaybeModedTInfo.getPointer().is<ModedTInfo *>();
}
TypeSourceInfo *getTypeSourceInfo() const {
return isModed() ? MaybeModedTInfo.getPointer().get<ModedTInfo *>()->first
: MaybeModedTInfo.getPointer().get<TypeSourceInfo *>();
}
QualType getUnderlyingType() const {
return isModed() ? MaybeModedTInfo.getPointer().get<ModedTInfo *>()->second
: MaybeModedTInfo.getPointer()
.get<TypeSourceInfo *>()
->getType();
}
void setTypeSourceInfo(TypeSourceInfo *newType) {
MaybeModedTInfo.setPointer(newType);
}
void setModedTypeSourceInfo(TypeSourceInfo *unmodedTSI, QualType modedTy) {
MaybeModedTInfo.setPointer(new (getASTContext(), 8)
ModedTInfo({unmodedTSI, modedTy}));
}
/// Retrieves the canonical declaration of this typedef-name.
TypedefNameDecl *getCanonicalDecl() override { return getFirstDecl(); }
const TypedefNameDecl *getCanonicalDecl() const { return getFirstDecl(); }
/// Retrieves the tag declaration for which this is the typedef name for
/// linkage purposes, if any.
///
/// \param AnyRedecl Look for the tag declaration in any redeclaration of
/// this typedef declaration.
TagDecl *getAnonDeclWithTypedefName(bool AnyRedecl = false) const;
/// Determines if this typedef shares a name and spelling location with its
/// underlying tag type, as is the case with the NS_ENUM macro.
bool isTransparentTag() const {
if (MaybeModedTInfo.getInt())
return MaybeModedTInfo.getInt() & 0x2;
return isTransparentTagSlow();
}
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) {
return K >= firstTypedefName && K <= lastTypedefName;
}
private:
bool isTransparentTagSlow() const;
};
/// Represents the declaration of a typedef-name via the 'typedef'
/// type specifier.
class TypedefDecl : public TypedefNameDecl {
TypedefDecl(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id, TypeSourceInfo *TInfo)
: TypedefNameDecl(Typedef, C, DC, StartLoc, IdLoc, Id, TInfo) {}
public:
static TypedefDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, TypeSourceInfo *TInfo);
static TypedefDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceRange getSourceRange() const override LLVM_READONLY;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == Typedef; }
};
/// Represents the declaration of a typedef-name via a C++11
/// alias-declaration.
class TypeAliasDecl : public TypedefNameDecl {
/// The template for which this is the pattern, if any.
TypeAliasTemplateDecl *Template;
TypeAliasDecl(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id, TypeSourceInfo *TInfo)
: TypedefNameDecl(TypeAlias, C, DC, StartLoc, IdLoc, Id, TInfo),
Template(nullptr) {}
public:
static TypeAliasDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, TypeSourceInfo *TInfo);
static TypeAliasDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceRange getSourceRange() const override LLVM_READONLY;
TypeAliasTemplateDecl *getDescribedAliasTemplate() const { return Template; }
void setDescribedAliasTemplate(TypeAliasTemplateDecl *TAT) { Template = TAT; }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == TypeAlias; }
};
/// Represents the declaration of a struct/union/class/enum.
class TagDecl
: public TypeDecl, public DeclContext, public Redeclarable<TagDecl> {
public:
// This is really ugly.
using TagKind = TagTypeKind;
private:
// FIXME: This can be packed into the bitfields in Decl.
/// The TagKind enum.
unsigned TagDeclKind : 3;
/// True if this is a definition ("struct foo {};"), false if it is a
/// declaration ("struct foo;"). It is not considered a definition
/// until the definition has been fully processed.
unsigned IsCompleteDefinition : 1;
protected:
/// True if this is currently being defined.
unsigned IsBeingDefined : 1;
private:
/// True if this tag declaration is "embedded" (i.e., defined or declared
/// for the very first time) in the syntax of a declarator.
unsigned IsEmbeddedInDeclarator : 1;
/// True if this tag is free standing, e.g. "struct foo;".
unsigned IsFreeStanding : 1;
protected:
// These are used by (and only defined for) EnumDecl.
unsigned NumPositiveBits : 8;
unsigned NumNegativeBits : 8;
/// True if this tag declaration is a scoped enumeration. Only
/// possible in C++11 mode.
unsigned IsScoped : 1;
/// If this tag declaration is a scoped enum,
/// then this is true if the scoped enum was declared using the class
/// tag, false if it was declared with the struct tag. No meaning is
/// associated if this tag declaration is not a scoped enum.
unsigned IsScopedUsingClassTag : 1;
/// True if this is an enumeration with fixed underlying type. Only
/// possible in C++11, Microsoft extensions, or Objective C mode.
unsigned IsFixed : 1;
/// Indicates whether it is possible for declarations of this kind
/// to have an out-of-date definition.
///
/// This option is only enabled when modules are enabled.
unsigned MayHaveOutOfDateDef : 1;
/// Has the full definition of this type been required by a use somewhere in
/// the TU.
unsigned IsCompleteDefinitionRequired : 1;
private:
SourceRange BraceRange;
// A struct representing syntactic qualifier info,
// to be used for the (uncommon) case of out-of-line declarations.
using ExtInfo = QualifierInfo;
/// If the (out-of-line) tag declaration name
/// is qualified, it points to the qualifier info (nns and range);
/// otherwise, if the tag declaration is anonymous and it is part of
/// a typedef or alias, it points to the TypedefNameDecl (used for mangling);
/// otherwise, if the tag declaration is anonymous and it is used as a
/// declaration specifier for variables, it points to the first VarDecl (used
/// for mangling);
/// otherwise, it is a null (TypedefNameDecl) pointer.
llvm::PointerUnion<TypedefNameDecl *, ExtInfo *> TypedefNameDeclOrQualifier;
bool hasExtInfo() const { return TypedefNameDeclOrQualifier.is<ExtInfo *>(); }
ExtInfo *getExtInfo() { return TypedefNameDeclOrQualifier.get<ExtInfo *>(); }
const ExtInfo *getExtInfo() const {
return TypedefNameDeclOrQualifier.get<ExtInfo *>();
}
protected:
TagDecl(Kind DK, TagKind TK, const ASTContext &C, DeclContext *DC,
SourceLocation L, IdentifierInfo *Id, TagDecl *PrevDecl,
SourceLocation StartL)
: TypeDecl(DK, DC, L, Id, StartL), DeclContext(DK), redeclarable_base(C),
TagDeclKind(TK), IsCompleteDefinition(false), IsBeingDefined(false),
IsEmbeddedInDeclarator(false), IsFreeStanding(false),
IsCompleteDefinitionRequired(false),
TypedefNameDeclOrQualifier((TypedefNameDecl *)nullptr) {
assert((DK != Enum || TK == TTK_Enum) &&
"EnumDecl not matched with TTK_Enum");
setPreviousDecl(PrevDecl);
}
using redeclarable_base = Redeclarable<TagDecl>;
TagDecl *getNextRedeclarationImpl() override {
return getNextRedeclaration();
}
TagDecl *getPreviousDeclImpl() override {
return getPreviousDecl();
}
TagDecl *getMostRecentDeclImpl() override {
return getMostRecentDecl();
}
/// Completes the definition of this tag declaration.
///
/// This is a helper function for derived classes.
void completeDefinition();
public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
using redecl_range = redeclarable_base::redecl_range;
using redecl_iterator = redeclarable_base::redecl_iterator;
using redeclarable_base::redecls_begin;
using redeclarable_base::redecls_end;
using redeclarable_base::redecls;
using redeclarable_base::getPreviousDecl;
using redeclarable_base::getMostRecentDecl;
using redeclarable_base::isFirstDecl;
SourceRange getBraceRange() const { return BraceRange; }
void setBraceRange(SourceRange R) { BraceRange = R; }
/// Return SourceLocation representing start of source
/// range ignoring outer template declarations.
SourceLocation getInnerLocStart() const { return getLocStart(); }
/// Return SourceLocation representing start of source
/// range taking into account any outer template declarations.
SourceLocation getOuterLocStart() const;
SourceRange getSourceRange() const override LLVM_READONLY;
TagDecl *getCanonicalDecl() override;
const TagDecl *getCanonicalDecl() const {
return const_cast<TagDecl*>(this)->getCanonicalDecl();
}
/// Return true if this declaration is a completion definition of the type.
/// Provided for consistency.
bool isThisDeclarationADefinition() const {
return isCompleteDefinition();
}
/// Return true if this decl has its body fully specified.
bool isCompleteDefinition() const {
return IsCompleteDefinition;
}
/// Return true if this complete decl is
/// required to be complete for some existing use.
bool isCompleteDefinitionRequired() const {
return IsCompleteDefinitionRequired;
}
/// Return true if this decl is currently being defined.
bool isBeingDefined() const {
return IsBeingDefined;
}
bool isEmbeddedInDeclarator() const {
return IsEmbeddedInDeclarator;
}
void setEmbeddedInDeclarator(bool isInDeclarator) {
IsEmbeddedInDeclarator = isInDeclarator;
}
bool isFreeStanding() const { return IsFreeStanding; }
void setFreeStanding(bool isFreeStanding = true) {
IsFreeStanding = isFreeStanding;
}
/// Whether this declaration declares a type that is
/// dependent, i.e., a type that somehow depends on template
/// parameters.
bool isDependentType() const { return isDependentContext(); }
/// Starts the definition of this tag declaration.
///
/// This method should be invoked at the beginning of the definition
/// of this tag declaration. It will set the tag type into a state
/// where it is in the process of being defined.
void startDefinition();
/// Returns the TagDecl that actually defines this
/// struct/union/class/enum. When determining whether or not a
/// struct/union/class/enum has a definition, one should use this
/// method as opposed to 'isDefinition'. 'isDefinition' indicates
/// whether or not a specific TagDecl is defining declaration, not
/// whether or not the struct/union/class/enum type is defined.
/// This method returns NULL if there is no TagDecl that defines
/// the struct/union/class/enum.
TagDecl *getDefinition() const;
void setCompleteDefinition(bool V) { IsCompleteDefinition = V; }
void setCompleteDefinitionRequired(bool V = true) {
IsCompleteDefinitionRequired = V;
}
StringRef getKindName() const {
return TypeWithKeyword::getTagTypeKindName(getTagKind());
}
TagKind getTagKind() const {
return TagKind(TagDeclKind);
}
void setTagKind(TagKind TK) { TagDeclKind = TK; }
bool isStruct() const { return getTagKind() == TTK_Struct; }
bool isInterface() const { return getTagKind() == TTK_Interface; }
bool isClass() const { return getTagKind() == TTK_Class; }
bool isUnion() const { return getTagKind() == TTK_Union; }
bool isEnum() const { return getTagKind() == TTK_Enum; }
/// Is this tag type named, either directly or via being defined in
/// a typedef of this type?
///
/// C++11 [basic.link]p8:
/// A type is said to have linkage if and only if:
/// - it is a class or enumeration type that is named (or has a
/// name for linkage purposes) and the name has linkage; ...
/// C++11 [dcl.typedef]p9:
/// If the typedef declaration defines an unnamed class (or enum),
/// the first typedef-name declared by the declaration to be that
/// class type (or enum type) is used to denote the class type (or
/// enum type) for linkage purposes only.
///
/// C does not have an analogous rule, but the same concept is
/// nonetheless useful in some places.
bool hasNameForLinkage() const {
return (getDeclName() || getTypedefNameForAnonDecl());
}
TypedefNameDecl *getTypedefNameForAnonDecl() const {
return hasExtInfo() ? nullptr
: TypedefNameDeclOrQualifier.get<TypedefNameDecl *>();
}
void setTypedefNameForAnonDecl(TypedefNameDecl *TDD);
/// Retrieve the nested-name-specifier that qualifies the name of this
/// declaration, if it was present in the source.
NestedNameSpecifier *getQualifier() const {
return hasExtInfo() ? getExtInfo()->QualifierLoc.getNestedNameSpecifier()
: nullptr;
}
/// Retrieve the nested-name-specifier (with source-location
/// information) that qualifies the name of this declaration, if it was
/// present in the source.
NestedNameSpecifierLoc getQualifierLoc() const {
return hasExtInfo() ? getExtInfo()->QualifierLoc
: NestedNameSpecifierLoc();
}
void setQualifierInfo(NestedNameSpecifierLoc QualifierLoc);
unsigned getNumTemplateParameterLists() const {
return hasExtInfo() ? getExtInfo()->NumTemplParamLists : 0;
}
TemplateParameterList *getTemplateParameterList(unsigned i) const {
assert(i < getNumTemplateParameterLists());
return getExtInfo()->TemplParamLists[i];
}
void setTemplateParameterListsInfo(ASTContext &Context,
ArrayRef<TemplateParameterList *> TPLists);
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K >= firstTag && K <= lastTag; }
static DeclContext *castToDeclContext(const TagDecl *D) {
return static_cast<DeclContext *>(const_cast<TagDecl*>(D));
}
static TagDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<TagDecl *>(const_cast<DeclContext*>(DC));
}
};
/// Represents an enum. In C++11, enums can be forward-declared
/// with a fixed underlying type, and in C we allow them to be forward-declared
/// with no underlying type as an extension.
class EnumDecl : public TagDecl {
/// This represent the integer type that the enum corresponds
/// to for code generation purposes. Note that the enumerator constants may
/// have a different type than this does.
///
/// If the underlying integer type was explicitly stated in the source
/// code, this is a TypeSourceInfo* for that type. Otherwise this type
/// was automatically deduced somehow, and this is a Type*.
///
/// Normally if IsFixed(), this would contain a TypeSourceInfo*, but in
/// some cases it won't.
///
/// The underlying type of an enumeration never has any qualifiers, so
/// we can get away with just storing a raw Type*, and thus save an
/// extra pointer when TypeSourceInfo is needed.
llvm::PointerUnion<const Type *, TypeSourceInfo *> IntegerType;
/// The integer type that values of this type should
/// promote to. In C, enumerators are generally of an integer type
/// directly, but gcc-style large enumerators (and all enumerators
/// in C++) are of the enum type instead.
QualType PromotionType;
/// If this enumeration is an instantiation of a member enumeration
/// of a class template specialization, this is the member specialization
/// information.
MemberSpecializationInfo *SpecializationInfo = nullptr;
/// Store the ODRHash after first calculation.
unsigned HasODRHash : 1;
unsigned ODRHash;
EnumDecl(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id, EnumDecl *PrevDecl,
bool Scoped, bool ScopedUsingClassTag, bool Fixed)
: TagDecl(Enum, TTK_Enum, C, DC, IdLoc, Id, PrevDecl, StartLoc) {
assert(Scoped || !ScopedUsingClassTag);
IntegerType = (const Type *)nullptr;
NumNegativeBits = 0;
NumPositiveBits = 0;
IsScoped = Scoped;
IsScopedUsingClassTag = ScopedUsingClassTag;
IsFixed = Fixed;
HasODRHash = false;
ODRHash = 0;
}
void anchor() override;
void setInstantiationOfMemberEnum(ASTContext &C, EnumDecl *ED,
TemplateSpecializationKind TSK);
public:
friend class ASTDeclReader;
EnumDecl *getCanonicalDecl() override {
return cast<EnumDecl>(TagDecl::getCanonicalDecl());
}
const EnumDecl *getCanonicalDecl() const {
return const_cast<EnumDecl*>(this)->getCanonicalDecl();
}
EnumDecl *getPreviousDecl() {
return cast_or_null<EnumDecl>(
static_cast<TagDecl *>(this)->getPreviousDecl());
}
const EnumDecl *getPreviousDecl() const {
return const_cast<EnumDecl*>(this)->getPreviousDecl();
}
EnumDecl *getMostRecentDecl() {
return cast<EnumDecl>(static_cast<TagDecl *>(this)->getMostRecentDecl());
}
const EnumDecl *getMostRecentDecl() const {
return const_cast<EnumDecl*>(this)->getMostRecentDecl();
}
EnumDecl *getDefinition() const {
return cast_or_null<EnumDecl>(TagDecl::getDefinition());
}
static EnumDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, EnumDecl *PrevDecl,
bool IsScoped, bool IsScopedUsingClassTag,
bool IsFixed);
static EnumDecl *CreateDeserialized(ASTContext &C, unsigned ID);
/// When created, the EnumDecl corresponds to a
/// forward-declared enum. This method is used to mark the
/// declaration as being defined; its enumerators have already been
/// added (via DeclContext::addDecl). NewType is the new underlying
/// type of the enumeration type.
void completeDefinition(QualType NewType,
QualType PromotionType,
unsigned NumPositiveBits,
unsigned NumNegativeBits);
// Iterates through the enumerators of this enumeration.
using enumerator_iterator = specific_decl_iterator<EnumConstantDecl>;
using enumerator_range =
llvm::iterator_range<specific_decl_iterator<EnumConstantDecl>>;
enumerator_range enumerators() const {
return enumerator_range(enumerator_begin(), enumerator_end());
}
enumerator_iterator enumerator_begin() const {
const EnumDecl *E = getDefinition();
if (!E)
E = this;
return enumerator_iterator(E->decls_begin());
}
enumerator_iterator enumerator_end() const {
const EnumDecl *E = getDefinition();
if (!E)
E = this;
return enumerator_iterator(E->decls_end());
}
/// Return the integer type that enumerators should promote to.
QualType getPromotionType() const { return PromotionType; }
/// Set the promotion type.
void setPromotionType(QualType T) { PromotionType = T; }
/// Return the integer type this enum decl corresponds to.
/// This returns a null QualType for an enum forward definition with no fixed
/// underlying type.
QualType getIntegerType() const {
if (!IntegerType)
return QualType();
if (const Type *T = IntegerType.dyn_cast<const Type*>())
return QualType(T, 0);
return IntegerType.get<TypeSourceInfo*>()->getType().getUnqualifiedType();
}
/// Set the underlying integer type.
void setIntegerType(QualType T) { IntegerType = T.getTypePtrOrNull(); }
/// Set the underlying integer type source info.
void setIntegerTypeSourceInfo(TypeSourceInfo *TInfo) { IntegerType = TInfo; }
/// Return the type source info for the underlying integer type,
/// if no type source info exists, return 0.
TypeSourceInfo *getIntegerTypeSourceInfo() const {
return IntegerType.dyn_cast<TypeSourceInfo*>();
}
/// Retrieve the source range that covers the underlying type if
/// specified.
SourceRange getIntegerTypeRange() const LLVM_READONLY;
/// Returns the width in bits required to store all the
/// non-negative enumerators of this enum.
unsigned getNumPositiveBits() const {
return NumPositiveBits;
}
void setNumPositiveBits(unsigned Num) {
NumPositiveBits = Num;
assert(NumPositiveBits == Num && "can't store this bitcount");
}
/// Returns the width in bits required to store all the
/// negative enumerators of this enum. These widths include
/// the rightmost leading 1; that is:
///
/// MOST NEGATIVE ENUMERATOR PATTERN NUM NEGATIVE BITS
/// ------------------------ ------- -----------------
/// -1 1111111 1
/// -10 1110110 5
/// -101 1001011 8
unsigned getNumNegativeBits() const {
return NumNegativeBits;
}
void setNumNegativeBits(unsigned Num) {
NumNegativeBits = Num;
}
/// Returns true if this is a C++11 scoped enumeration.
bool isScoped() const {
return IsScoped;
}
/// Returns true if this is a C++11 scoped enumeration.
bool isScopedUsingClassTag() const {
return IsScopedUsingClassTag;
}
/// Returns true if this is an Objective-C, C++11, or
/// Microsoft-style enumeration with a fixed underlying type.
bool isFixed() const {
return IsFixed;
}
unsigned getODRHash();
/// Returns true if this can be considered a complete type.
bool isComplete() const {
// IntegerType is set for fixed type enums and non-fixed but implicitly
// int-sized Microsoft enums.
return isCompleteDefinition() || IntegerType;
}
/// Returns true if this enum is either annotated with
/// enum_extensibility(closed) or isn't annotated with enum_extensibility.
bool isClosed() const;
/// Returns true if this enum is annotated with flag_enum and isn't annotated
/// with enum_extensibility(open).
bool isClosedFlag() const;
/// Returns true if this enum is annotated with neither flag_enum nor
/// enum_extensibility(open).
bool isClosedNonFlag() const;
/// Retrieve the enum definition from which this enumeration could
/// be instantiated, if it is an instantiation (rather than a non-template).
EnumDecl *getTemplateInstantiationPattern() const;
/// Returns the enumeration (declared within the template)
/// from which this enumeration type was instantiated, or NULL if
/// this enumeration was not instantiated from any template.
EnumDecl *getInstantiatedFromMemberEnum() const;
/// If this enumeration is a member of a specialization of a
/// templated class, determine what kind of template specialization
/// or instantiation this is.
TemplateSpecializationKind getTemplateSpecializationKind() const;
/// For an enumeration member that was instantiated from a member
/// enumeration of a templated class, set the template specialiation kind.
void setTemplateSpecializationKind(TemplateSpecializationKind TSK,
SourceLocation PointOfInstantiation = SourceLocation());
/// If this enumeration is an instantiation of a member enumeration of
/// a class template specialization, retrieves the member specialization
/// information.
MemberSpecializationInfo *getMemberSpecializationInfo() const {
return SpecializationInfo;
}
/// Specify that this enumeration is an instantiation of the
/// member enumeration ED.
void setInstantiationOfMemberEnum(EnumDecl *ED,
TemplateSpecializationKind TSK) {
setInstantiationOfMemberEnum(getASTContext(), ED, TSK);
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == Enum; }
};
/// Represents a struct/union/class. For example:
/// struct X; // Forward declaration, no "body".
/// union Y { int A, B; }; // Has body with members A and B (FieldDecls).
/// This decl will be marked invalid if *any* members are invalid.
class RecordDecl : public TagDecl {
public:
/// Enum that represents the different ways arguments are passed to and
/// returned from function calls. This takes into account the target-specific
/// and version-specific rules along with the rules determined by the
/// language.
enum ArgPassingKind : unsigned {
/// The argument of this type can be passed directly in registers.
APK_CanPassInRegs,
/// The argument of this type cannot be passed directly in registers.
/// Records containing this type as a subobject are not forced to be passed
/// indirectly. This value is used only in C++. This value is required by
/// C++ because, in uncommon situations, it is possible for a class to have
/// only trivial copy/move constructors even when one of its subobjects has
/// a non-trivial copy/move constructor (if e.g. the corresponding copy/move
/// constructor in the derived class is deleted).
APK_CannotPassInRegs,
/// The argument of this type cannot be passed directly in registers.
/// Records containing this type as a subobject are forced to be passed
/// indirectly.
APK_CanNeverPassInRegs
};
private:
friend class DeclContext;
// FIXME: This can be packed into the bitfields in Decl.
/// This is true if this struct ends with a flexible
/// array member (e.g. int X[]) or if this union contains a struct that does.
/// If so, this cannot be contained in arrays or other structs as a member.
unsigned HasFlexibleArrayMember : 1;
/// Whether this is the type of an anonymous struct or union.
unsigned AnonymousStructOrUnion : 1;
/// This is true if this struct has at least one member
/// containing an Objective-C object pointer type.
unsigned HasObjectMember : 1;
/// This is true if struct has at least one member of
/// 'volatile' type.
unsigned HasVolatileMember : 1;
/// Whether the field declarations of this record have been loaded
/// from external storage. To avoid unnecessary deserialization of
/// methods/nested types we allow deserialization of just the fields
/// when needed.
mutable unsigned LoadedFieldsFromExternalStorage : 1;
/// Basic properties of non-trivial C structs.
unsigned NonTrivialToPrimitiveDefaultInitialize : 1;
unsigned NonTrivialToPrimitiveCopy : 1;
unsigned NonTrivialToPrimitiveDestroy : 1;
/// Indicates whether this struct is destroyed in the callee.
///
/// Please note that MSVC won't merge adjacent bitfields if they don't have
/// the same type.
unsigned ParamDestroyedInCallee : 1;
/// Represents the way this type is passed to a function.
unsigned ArgPassingRestrictions : 2;
protected:
RecordDecl(Kind DK, TagKind TK, const ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, RecordDecl *PrevDecl);
public:
static RecordDecl *Create(const ASTContext &C, TagKind TK, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
IdentifierInfo *Id, RecordDecl* PrevDecl = nullptr);
static RecordDecl *CreateDeserialized(const ASTContext &C, unsigned ID);
RecordDecl *getPreviousDecl() {
return cast_or_null<RecordDecl>(
static_cast<TagDecl *>(this)->getPreviousDecl());
}
const RecordDecl *getPreviousDecl() const {
return const_cast<RecordDecl*>(this)->getPreviousDecl();
}
RecordDecl *getMostRecentDecl() {
return cast<RecordDecl>(static_cast<TagDecl *>(this)->getMostRecentDecl());
}
const RecordDecl *getMostRecentDecl() const {
return const_cast<RecordDecl*>(this)->getMostRecentDecl();
}
bool hasFlexibleArrayMember() const { return HasFlexibleArrayMember; }
void setHasFlexibleArrayMember(bool V) { HasFlexibleArrayMember = V; }
/// Whether this is an anonymous struct or union. To be an anonymous
/// struct or union, it must have been declared without a name and
/// there must be no objects of this type declared, e.g.,
/// @code
/// union { int i; float f; };
/// @endcode
/// is an anonymous union but neither of the following are:
/// @code
/// union X { int i; float f; };
/// union { int i; float f; } obj;
/// @endcode
bool isAnonymousStructOrUnion() const { return AnonymousStructOrUnion; }
void setAnonymousStructOrUnion(bool Anon) {
AnonymousStructOrUnion = Anon;
}
bool hasObjectMember() const { return HasObjectMember; }
void setHasObjectMember (bool val) { HasObjectMember = val; }
bool hasVolatileMember() const { return HasVolatileMember; }
void setHasVolatileMember (bool val) { HasVolatileMember = val; }
bool hasLoadedFieldsFromExternalStorage() const {
return LoadedFieldsFromExternalStorage;
}
void setHasLoadedFieldsFromExternalStorage(bool val) {
LoadedFieldsFromExternalStorage = val;
}
/// Functions to query basic properties of non-trivial C structs.
bool isNonTrivialToPrimitiveDefaultInitialize() const {
return NonTrivialToPrimitiveDefaultInitialize;
}
void setNonTrivialToPrimitiveDefaultInitialize(bool V) {
NonTrivialToPrimitiveDefaultInitialize = V;
}
bool isNonTrivialToPrimitiveCopy() const {
return NonTrivialToPrimitiveCopy;
}
void setNonTrivialToPrimitiveCopy(bool V) {
NonTrivialToPrimitiveCopy = V;
}
bool isNonTrivialToPrimitiveDestroy() const {
return NonTrivialToPrimitiveDestroy;
}
void setNonTrivialToPrimitiveDestroy(bool V) {
NonTrivialToPrimitiveDestroy = V;
}
/// Determine whether this class can be passed in registers. In C++ mode,
/// it must have at least one trivial, non-deleted copy or move constructor.
/// FIXME: This should be set as part of completeDefinition.
bool canPassInRegisters() const {
return getArgPassingRestrictions() == APK_CanPassInRegs;
}
ArgPassingKind getArgPassingRestrictions() const {
return static_cast<ArgPassingKind>(ArgPassingRestrictions);
}
void setArgPassingRestrictions(ArgPassingKind Kind) {
ArgPassingRestrictions = static_cast<uint8_t>(Kind);
}
bool isParamDestroyedInCallee() const {
return ParamDestroyedInCallee;
}
void setParamDestroyedInCallee(bool V) {
ParamDestroyedInCallee = V;
}
/// Determines whether this declaration represents the
/// injected class name.
///
/// The injected class name in C++ is the name of the class that
/// appears inside the class itself. For example:
///
/// \code
/// struct C {
/// // C is implicitly declared here as a synonym for the class name.
/// };
///
/// C::C c; // same as "C c;"
/// \endcode
bool isInjectedClassName() const;
/// Determine whether this record is a class describing a lambda
/// function object.
bool isLambda() const;
/// Determine whether this record is a record for captured variables in
/// CapturedStmt construct.
bool isCapturedRecord() const;
/// Mark the record as a record for captured variables in CapturedStmt
/// construct.
void setCapturedRecord();
/// Returns the RecordDecl that actually defines
/// this struct/union/class. When determining whether or not a
/// struct/union/class is completely defined, one should use this
/// method as opposed to 'isCompleteDefinition'.
/// 'isCompleteDefinition' indicates whether or not a specific
/// RecordDecl is a completed definition, not whether or not the
/// record type is defined. This method returns NULL if there is
/// no RecordDecl that defines the struct/union/tag.
RecordDecl *getDefinition() const {
return cast_or_null<RecordDecl>(TagDecl::getDefinition());
}
// Iterator access to field members. The field iterator only visits
// the non-static data members of this class, ignoring any static
// data members, functions, constructors, destructors, etc.
using field_iterator = specific_decl_iterator<FieldDecl>;
using field_range = llvm::iterator_range<specific_decl_iterator<FieldDecl>>;
field_range fields() const { return field_range(field_begin(), field_end()); }
field_iterator field_begin() const;
field_iterator field_end() const {
return field_iterator(decl_iterator());
}
// Whether there are any fields (non-static data members) in this record.
bool field_empty() const {
return field_begin() == field_end();
}
/// Note that the definition of this type is now complete.
virtual void completeDefinition();
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) {
return K >= firstRecord && K <= lastRecord;
}
/// Get whether or not this is an ms_struct which can
/// be turned on with an attribute, pragma, or -mms-bitfields
/// commandline option.
bool isMsStruct(const ASTContext &C) const;
/// Whether we are allowed to insert extra padding between fields.
/// These padding are added to help AddressSanitizer detect
/// intra-object-overflow bugs.
bool mayInsertExtraPadding(bool EmitRemark = false) const;
/// Finds the first data member which has a name.
/// nullptr is returned if no named data member exists.
const FieldDecl *findFirstNamedDataMember() const;
private:
/// Deserialize just the fields.
void LoadFieldsFromExternalStorage() const;
};
class FileScopeAsmDecl : public Decl {
StringLiteral *AsmString;
SourceLocation RParenLoc;
FileScopeAsmDecl(DeclContext *DC, StringLiteral *asmstring,
SourceLocation StartL, SourceLocation EndL)
: Decl(FileScopeAsm, DC, StartL), AsmString(asmstring), RParenLoc(EndL) {}
virtual void anchor();
public:
static FileScopeAsmDecl *Create(ASTContext &C, DeclContext *DC,
StringLiteral *Str, SourceLocation AsmLoc,
SourceLocation RParenLoc);
static FileScopeAsmDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceLocation getAsmLoc() const { return getLocation(); }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceRange getSourceRange() const override LLVM_READONLY {
return SourceRange(getAsmLoc(), getRParenLoc());
}
const StringLiteral *getAsmString() const { return AsmString; }
StringLiteral *getAsmString() { return AsmString; }
void setAsmString(StringLiteral *Asm) { AsmString = Asm; }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == FileScopeAsm; }
};
/// Pepresents a block literal declaration, which is like an
/// unnamed FunctionDecl. For example:
/// ^{ statement-body } or ^(int arg1, float arg2){ statement-body }
class BlockDecl : public Decl, public DeclContext {
public:
/// A class which contains all the information about a particular
/// captured value.
class Capture {
enum {
flag_isByRef = 0x1,
flag_isNested = 0x2
};
/// The variable being captured.
llvm::PointerIntPair<VarDecl*, 2> VariableAndFlags;
/// The copy expression, expressed in terms of a DeclRef (or
/// BlockDeclRef) to the captured variable. Only required if the
/// variable has a C++ class type.
Expr *CopyExpr;
public:
Capture(VarDecl *variable, bool byRef, bool nested, Expr *copy)
: VariableAndFlags(variable,
(byRef ? flag_isByRef : 0) | (nested ? flag_isNested : 0)),
CopyExpr(copy) {}
/// The variable being captured.
VarDecl *getVariable() const { return VariableAndFlags.getPointer(); }
/// Whether this is a "by ref" capture, i.e. a capture of a __block
/// variable.
bool isByRef() const { return VariableAndFlags.getInt() & flag_isByRef; }
/// Whether this is a nested capture, i.e. the variable captured
/// is not from outside the immediately enclosing function/block.
bool isNested() const { return VariableAndFlags.getInt() & flag_isNested; }
bool hasCopyExpr() const { return CopyExpr != nullptr; }
Expr *getCopyExpr() const { return CopyExpr; }
void setCopyExpr(Expr *e) { CopyExpr = e; }
};
private:
// FIXME: This can be packed into the bitfields in Decl.
bool IsVariadic : 1;
bool CapturesCXXThis : 1;
bool BlockMissingReturnType : 1;
bool IsConversionFromLambda : 1;
/// A bit that indicates this block is passed directly to a function as a
/// non-escaping parameter.
bool DoesNotEscape : 1;
/// A new[]'d array of pointers to ParmVarDecls for the formal
/// parameters of this function. This is null if a prototype or if there are
/// no formals.
ParmVarDecl **ParamInfo = nullptr;
unsigned NumParams = 0;
Stmt *Body = nullptr;
TypeSourceInfo *SignatureAsWritten = nullptr;
const Capture *Captures = nullptr;
unsigned NumCaptures = 0;
unsigned ManglingNumber = 0;
Decl *ManglingContextDecl = nullptr;
protected:
BlockDecl(DeclContext *DC, SourceLocation CaretLoc)
: Decl(Block, DC, CaretLoc), DeclContext(Block), IsVariadic(false),
CapturesCXXThis(false), BlockMissingReturnType(true),
IsConversionFromLambda(false), DoesNotEscape(false) {}
public:
static BlockDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L);
static BlockDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceLocation getCaretLocation() const { return getLocation(); }
bool isVariadic() const { return IsVariadic; }
void setIsVariadic(bool value) { IsVariadic = value; }
CompoundStmt *getCompoundBody() const { return (CompoundStmt*) Body; }
Stmt *getBody() const override { return (Stmt*) Body; }
void setBody(CompoundStmt *B) { Body = (Stmt*) B; }
void setSignatureAsWritten(TypeSourceInfo *Sig) { SignatureAsWritten = Sig; }
TypeSourceInfo *getSignatureAsWritten() const { return SignatureAsWritten; }
// ArrayRef access to formal parameters.
ArrayRef<ParmVarDecl *> parameters() const {
return {ParamInfo, getNumParams()};
}
MutableArrayRef<ParmVarDecl *> parameters() {
return {ParamInfo, getNumParams()};
}
// Iterator access to formal parameters.
using param_iterator = MutableArrayRef<ParmVarDecl *>::iterator;
using param_const_iterator = ArrayRef<ParmVarDecl *>::const_iterator;
bool param_empty() const { return parameters().empty(); }
param_iterator param_begin() { return parameters().begin(); }
param_iterator param_end() { return parameters().end(); }
param_const_iterator param_begin() const { return parameters().begin(); }
param_const_iterator param_end() const { return parameters().end(); }
size_t param_size() const { return parameters().size(); }
unsigned getNumParams() const { return NumParams; }
const ParmVarDecl *getParamDecl(unsigned i) const {
assert(i < getNumParams() && "Illegal param #");
return ParamInfo[i];
}
ParmVarDecl *getParamDecl(unsigned i) {
assert(i < getNumParams() && "Illegal param #");
return ParamInfo[i];
}
void setParams(ArrayRef<ParmVarDecl *> NewParamInfo);
/// True if this block (or its nested blocks) captures
/// anything of local storage from its enclosing scopes.
bool hasCaptures() const { return NumCaptures != 0 || CapturesCXXThis; }
/// Returns the number of captured variables.
/// Does not include an entry for 'this'.
unsigned getNumCaptures() const { return NumCaptures; }
using capture_const_iterator = ArrayRef<Capture>::const_iterator;
ArrayRef<Capture> captures() const { return {Captures, NumCaptures}; }
capture_const_iterator capture_begin() const { return captures().begin(); }
capture_const_iterator capture_end() const { return captures().end(); }
bool capturesCXXThis() const { return CapturesCXXThis; }
bool blockMissingReturnType() const { return BlockMissingReturnType; }
void setBlockMissingReturnType(bool val) { BlockMissingReturnType = val; }
bool isConversionFromLambda() const { return IsConversionFromLambda; }
void setIsConversionFromLambda(bool val) { IsConversionFromLambda = val; }
bool doesNotEscape() const { return DoesNotEscape; }
void setDoesNotEscape() { DoesNotEscape = true; }
bool capturesVariable(const VarDecl *var) const;
void setCaptures(ASTContext &Context, ArrayRef<Capture> Captures,
bool CapturesCXXThis);
unsigned getBlockManglingNumber() const {
return ManglingNumber;
}
Decl *getBlockManglingContextDecl() const {
return ManglingContextDecl;
}
void setBlockMangling(unsigned Number, Decl *Ctx) {
ManglingNumber = Number;
ManglingContextDecl = Ctx;
}
SourceRange getSourceRange() const override LLVM_READONLY;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == Block; }
static DeclContext *castToDeclContext(const BlockDecl *D) {
return static_cast<DeclContext *>(const_cast<BlockDecl*>(D));
}
static BlockDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<BlockDecl *>(const_cast<DeclContext*>(DC));
}
};
/// Represents the body of a CapturedStmt, and serves as its DeclContext.
class CapturedDecl final
: public Decl,
public DeclContext,
private llvm::TrailingObjects<CapturedDecl, ImplicitParamDecl *> {
protected:
size_t numTrailingObjects(OverloadToken<ImplicitParamDecl>) {
return NumParams;
}
private:
/// The number of parameters to the outlined function.
unsigned NumParams;
/// The position of context parameter in list of parameters.
unsigned ContextParam;
/// The body of the outlined function.
llvm::PointerIntPair<Stmt *, 1, bool> BodyAndNothrow;
explicit CapturedDecl(DeclContext *DC, unsigned NumParams);
ImplicitParamDecl *const *getParams() const {
return getTrailingObjects<ImplicitParamDecl *>();
}
ImplicitParamDecl **getParams() {
return getTrailingObjects<ImplicitParamDecl *>();
}
public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
friend TrailingObjects;
static CapturedDecl *Create(ASTContext &C, DeclContext *DC,
unsigned NumParams);
static CapturedDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumParams);
Stmt *getBody() const override;
void setBody(Stmt *B);
bool isNothrow() const;
void setNothrow(bool Nothrow = true);
unsigned getNumParams() const { return NumParams; }
ImplicitParamDecl *getParam(unsigned i) const {
assert(i < NumParams);
return getParams()[i];
}
void setParam(unsigned i, ImplicitParamDecl *P) {
assert(i < NumParams);
getParams()[i] = P;
}
// ArrayRef interface to parameters.
ArrayRef<ImplicitParamDecl *> parameters() const {
return {getParams(), getNumParams()};
}
MutableArrayRef<ImplicitParamDecl *> parameters() {
return {getParams(), getNumParams()};
}
/// Retrieve the parameter containing captured variables.
ImplicitParamDecl *getContextParam() const {
assert(ContextParam < NumParams);
return getParam(ContextParam);
}
void setContextParam(unsigned i, ImplicitParamDecl *P) {
assert(i < NumParams);
ContextParam = i;
setParam(i, P);
}
unsigned getContextParamPosition() const { return ContextParam; }
using param_iterator = ImplicitParamDecl *const *;
using param_range = llvm::iterator_range<param_iterator>;
/// Retrieve an iterator pointing to the first parameter decl.
param_iterator param_begin() const { return getParams(); }
/// Retrieve an iterator one past the last parameter decl.
param_iterator param_end() const { return getParams() + NumParams; }
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == Captured; }
static DeclContext *castToDeclContext(const CapturedDecl *D) {
return static_cast<DeclContext *>(const_cast<CapturedDecl *>(D));
}
static CapturedDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<CapturedDecl *>(const_cast<DeclContext *>(DC));
}
};
/// Describes a module import declaration, which makes the contents
/// of the named module visible in the current translation unit.
///
/// An import declaration imports the named module (or submodule). For example:
/// \code
/// @import std.vector;
/// \endcode
///
/// Import declarations can also be implicitly generated from
/// \#include/\#import directives.
class ImportDecl final : public Decl,
llvm::TrailingObjects<ImportDecl, SourceLocation> {
friend class ASTContext;
friend class ASTDeclReader;
friend class ASTReader;
friend TrailingObjects;
/// The imported module, along with a bit that indicates whether
/// we have source-location information for each identifier in the module
/// name.
///
/// When the bit is false, we only have a single source location for the
/// end of the import declaration.
llvm::PointerIntPair<Module *, 1, bool> ImportedAndComplete;
/// The next import in the list of imports local to the translation
/// unit being parsed (not loaded from an AST file).
ImportDecl *NextLocalImport = nullptr;
ImportDecl(DeclContext *DC, SourceLocation StartLoc, Module *Imported,
ArrayRef<SourceLocation> IdentifierLocs);
ImportDecl(DeclContext *DC, SourceLocation StartLoc, Module *Imported,
SourceLocation EndLoc);
ImportDecl(EmptyShell Empty) : Decl(Import, Empty) {}
public:
/// Create a new module import declaration.
static ImportDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, Module *Imported,
ArrayRef<SourceLocation> IdentifierLocs);
/// Create a new module import declaration for an implicitly-generated
/// import.
static ImportDecl *CreateImplicit(ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, Module *Imported,
SourceLocation EndLoc);
/// Create a new, deserialized module import declaration.
static ImportDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NumLocations);
/// Retrieve the module that was imported by the import declaration.
Module *getImportedModule() const { return ImportedAndComplete.getPointer(); }
/// Retrieves the locations of each of the identifiers that make up
/// the complete module name in the import declaration.
///
/// This will return an empty array if the locations of the individual
/// identifiers aren't available.
ArrayRef<SourceLocation> getIdentifierLocs() const;
SourceRange getSourceRange() const override LLVM_READONLY;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == Import; }
};
/// Represents a C++ Modules TS module export declaration.
///
/// For example:
/// \code
/// export void foo();
/// \endcode
class ExportDecl final : public Decl, public DeclContext {
virtual void anchor();
private:
friend class ASTDeclReader;
/// The source location for the right brace (if valid).
SourceLocation RBraceLoc;
ExportDecl(DeclContext *DC, SourceLocation ExportLoc)
: Decl(Export, DC, ExportLoc), DeclContext(Export),
RBraceLoc(SourceLocation()) {}
public:
static ExportDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation ExportLoc);
static ExportDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceLocation getExportLoc() const { return getLocation(); }
SourceLocation getRBraceLoc() const { return RBraceLoc; }
void setRBraceLoc(SourceLocation L) { RBraceLoc = L; }
SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (RBraceLoc.isValid())
return RBraceLoc;
// No braces: get the end location of the (only) declaration in context
// (if present).
return decls_empty() ? getLocation() : decls_begin()->getLocEnd();
}
SourceRange getSourceRange() const override LLVM_READONLY {
return SourceRange(getLocation(), getLocEnd());
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == Export; }
static DeclContext *castToDeclContext(const ExportDecl *D) {
return static_cast<DeclContext *>(const_cast<ExportDecl*>(D));
}
static ExportDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<ExportDecl *>(const_cast<DeclContext*>(DC));
}
};
/// Represents an empty-declaration.
class EmptyDecl : public Decl {
EmptyDecl(DeclContext *DC, SourceLocation L) : Decl(Empty, DC, L) {}
virtual void anchor();
public:
static EmptyDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L);
static EmptyDecl *CreateDeserialized(ASTContext &C, unsigned ID);
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == Empty; }
};
/// Insertion operator for diagnostics. This allows sending NamedDecl's
/// into a diagnostic with <<.
inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
const NamedDecl* ND) {
DB.AddTaggedVal(reinterpret_cast<intptr_t>(ND),
DiagnosticsEngine::ak_nameddecl);
return DB;
}
inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
const NamedDecl* ND) {
PD.AddTaggedVal(reinterpret_cast<intptr_t>(ND),
DiagnosticsEngine::ak_nameddecl);
return PD;
}
template<typename decl_type>
void Redeclarable<decl_type>::setPreviousDecl(decl_type *PrevDecl) {
// Note: This routine is implemented here because we need both NamedDecl
// and Redeclarable to be defined.
assert(RedeclLink.isFirst() &&
"setPreviousDecl on a decl already in a redeclaration chain");
if (PrevDecl) {
// Point to previous. Make sure that this is actually the most recent
// redeclaration, or we can build invalid chains. If the most recent
// redeclaration is invalid, it won't be PrevDecl, but we want it anyway.
First = PrevDecl->getFirstDecl();
assert(First->RedeclLink.isFirst() && "Expected first");
decl_type *MostRecent = First->getNextRedeclaration();
RedeclLink = PreviousDeclLink(cast<decl_type>(MostRecent));
// If the declaration was previously visible, a redeclaration of it remains
// visible even if it wouldn't be visible by itself.
static_cast<decl_type*>(this)->IdentifierNamespace |=
MostRecent->getIdentifierNamespace() &
(Decl::IDNS_Ordinary | Decl::IDNS_Tag | Decl::IDNS_Type);
} else {
// Make this first.
First = static_cast<decl_type*>(this);
}
// First one will point to this one as latest.
First->RedeclLink.setLatest(static_cast<decl_type*>(this));
assert(!isa<NamedDecl>(static_cast<decl_type*>(this)) ||
cast<NamedDecl>(static_cast<decl_type*>(this))->isLinkageValid());
}
// Inline function definitions.
/// Check if the given decl is complete.
///
/// We use this function to break a cycle between the inline definitions in
/// Type.h and Decl.h.
inline bool IsEnumDeclComplete(EnumDecl *ED) {
return ED->isComplete();
}
/// Check if the given decl is scoped.
///
/// We use this function to break a cycle between the inline definitions in
/// Type.h and Decl.h.
inline bool IsEnumDeclScoped(EnumDecl *ED) {
return ED->isScoped();
}
} // namespace clang
#endif // LLVM_CLANG_AST_DECL_H
Index: projects/clang700-import/contrib/llvm/tools/clang/include/clang/Driver/Options.td
===================================================================
--- projects/clang700-import/contrib/llvm/tools/clang/include/clang/Driver/Options.td (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/clang/include/clang/Driver/Options.td (revision 340125)
@@ -1,3013 +1,3013 @@
//===--- Options.td - Options for clang -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the options accepted by clang.
//
//===----------------------------------------------------------------------===//
// Include the common option parsing interfaces.
include "llvm/Option/OptParser.td"
/////////
// Flags
// DriverOption - The option is a "driver" option, and should not be forwarded
// to other tools.
def DriverOption : OptionFlag;
// LinkerInput - The option is a linker input.
def LinkerInput : OptionFlag;
// NoArgumentUnused - Don't report argument unused warnings for this option; this
// is useful for options like -static or -dynamic which a user may always end up
// passing, even if the platform defaults to (or only supports) that option.
def NoArgumentUnused : OptionFlag;
// Unsupported - The option is unsupported, and the driver will reject command
// lines that use it.
def Unsupported : OptionFlag;
// Ignored - The option is unsupported, and the driver will silently ignore it.
def Ignored : OptionFlag;
// CoreOption - This is considered a "core" Clang option, available in both
// clang and clang-cl modes.
def CoreOption : OptionFlag;
// CLOption - This is a cl.exe compatibility option. Options with this flag
// are made available when the driver is running in CL compatibility mode.
def CLOption : OptionFlag;
// CC1Option - This option should be accepted by clang -cc1.
def CC1Option : OptionFlag;
// CC1AsOption - This option should be accepted by clang -cc1as.
def CC1AsOption : OptionFlag;
// NoDriverOption - This option should not be accepted by the driver.
def NoDriverOption : OptionFlag;
// A short name to show in documentation. The name will be interpreted as rST.
class DocName<string name> { string DocName = name; }
// A brief description to show in documentation, interpreted as rST.
class DocBrief<code descr> { code DocBrief = descr; }
// Indicates that this group should be flattened into its parent when generating
// documentation.
class DocFlatten { bit DocFlatten = 1; }
// Indicates that this warning is ignored, but accepted with a warning for
// GCC compatibility.
class IgnoredGCCCompat : Flags<[HelpHidden]> {}
/////////
// Groups
def Action_Group : OptionGroup<"<action group>">, DocName<"Actions">,
DocBrief<[{The action to perform on the input.}]>;
// Meta-group for options which are only used for compilation,
// and not linking etc.
def CompileOnly_Group : OptionGroup<"<CompileOnly group>">,
DocName<"Compilation flags">, DocBrief<[{
Flags controlling the behavior of Clang during compilation. These flags have
no effect during actions that do not perform compilation.}]>;
def Preprocessor_Group : OptionGroup<"<Preprocessor group>">,
Group<CompileOnly_Group>,
DocName<"Preprocessor flags">, DocBrief<[{
Flags controlling the behavior of the Clang preprocessor.}]>;
def IncludePath_Group : OptionGroup<"<I/i group>">, Group<Preprocessor_Group>,
DocName<"Include path management">,
DocBrief<[{
Flags controlling how ``#include``\s are resolved to files.}]>;
def I_Group : OptionGroup<"<I group>">, Group<IncludePath_Group>, DocFlatten;
def i_Group : OptionGroup<"<i group>">, Group<IncludePath_Group>, DocFlatten;
def clang_i_Group : OptionGroup<"<clang i group>">, Group<i_Group>, DocFlatten;
def M_Group : OptionGroup<"<M group>">, Group<Preprocessor_Group>,
DocName<"Dependency file generation">, DocBrief<[{
Flags controlling generation of a dependency file for ``make``-like build
systems.}]>;
def d_Group : OptionGroup<"<d group>">, Group<Preprocessor_Group>,
DocName<"Dumping preprocessor state">, DocBrief<[{
Flags allowing the state of the preprocessor to be dumped in various ways.}]>;
def Diag_Group : OptionGroup<"<W/R group>">, Group<CompileOnly_Group>,
DocName<"Diagnostic flags">, DocBrief<[{
Flags controlling which warnings, errors, and remarks Clang will generate.
See the :doc:`full list of warning and remark flags <DiagnosticsReference>`.}]>;
def R_Group : OptionGroup<"<R group>">, Group<Diag_Group>, DocFlatten;
def R_value_Group : OptionGroup<"<R (with value) group>">, Group<R_Group>,
DocFlatten;
def W_Group : OptionGroup<"<W group>">, Group<Diag_Group>, DocFlatten;
def W_value_Group : OptionGroup<"<W (with value) group>">, Group<W_Group>,
DocFlatten;
def f_Group : OptionGroup<"<f group>">, Group<CompileOnly_Group>,
DocName<"Target-independent compilation options">;
def f_clang_Group : OptionGroup<"<f (clang-only) group>">,
Group<CompileOnly_Group>, DocFlatten;
def pedantic_Group : OptionGroup<"<pedantic group>">, Group<f_Group>,
DocFlatten;
def opencl_Group : OptionGroup<"<opencl group>">, Group<f_Group>,
DocName<"OpenCL flags">;
def m_Group : OptionGroup<"<m group>">, Group<CompileOnly_Group>,
DocName<"Target-dependent compilation options">;
// Feature groups - these take command line options that correspond directly to
// target specific features and can be translated directly from command line
// options.
def m_aarch64_Features_Group : OptionGroup<"<aarch64 features group>">,
Group<m_Group>, DocName<"AARCH64">;
def m_amdgpu_Features_Group : OptionGroup<"<amdgpu features group>">,
Group<m_Group>, DocName<"AMDGPU">;
def m_arm_Features_Group : OptionGroup<"<arm features group>">,
Group<m_Group>, DocName<"ARM">;
def m_hexagon_Features_Group : OptionGroup<"<hexagon features group>">,
Group<m_Group>, DocName<"Hexagon">;
// The features added by this group will not be added to target features.
// These are explicitly handled.
def m_hexagon_Features_HVX_Group : OptionGroup<"<hexagon features group>">,
Group<m_Group>, DocName<"Hexagon">;
def m_mips_Features_Group : OptionGroup<"<mips features group>">,
Group<m_Group>, DocName<"MIPS">;
def m_ppc_Features_Group : OptionGroup<"<ppc features group>">,
Group<m_Group>, DocName<"PowerPC">;
def m_wasm_Features_Group : OptionGroup<"<wasm features group>">,
Group<m_Group>, DocName<"WebAssembly">;
def m_x86_Features_Group : OptionGroup<"<x86 features group>">,
Group<m_Group>, Flags<[CoreOption]>, DocName<"X86">;
def m_riscv_Features_Group : OptionGroup<"<riscv features group>">,
Group<m_Group>, DocName<"RISCV">;
def m_libc_Group : OptionGroup<"<m libc group>">, Group<m_mips_Features_Group>,
Flags<[HelpHidden]>;
def O_Group : OptionGroup<"<O group>">, Group<CompileOnly_Group>,
DocName<"Optimization level">, DocBrief<[{
Flags controlling how much optimization should be performed.}]>;
def DebugInfo_Group : OptionGroup<"<g group>">, Group<CompileOnly_Group>,
DocName<"Debug information generation">, DocBrief<[{
Flags controlling how much and what kind of debug information should be
generated.}]>;
def g_Group : OptionGroup<"<g group>">, Group<DebugInfo_Group>,
DocName<"Kind and level of debug information">;
def gN_Group : OptionGroup<"<gN group>">, Group<g_Group>,
DocName<"Debug level">;
def ggdbN_Group : OptionGroup<"<ggdbN group>">, Group<gN_Group>, DocFlatten;
def gTune_Group : OptionGroup<"<gTune group>">, Group<g_Group>,
DocName<"Debugger to tune debug information for">;
def g_flags_Group : OptionGroup<"<g flags group>">, Group<DebugInfo_Group>,
DocName<"Debug information flags">;
def StaticAnalyzer_Group : OptionGroup<"<Static analyzer group>">,
DocName<"Static analyzer flags">, DocBrief<[{
Flags controlling the behavior of the Clang Static Analyzer.}]>;
// gfortran options that we recognize in the driver and pass along when
// invoking GCC to compile Fortran code.
def gfortran_Group : OptionGroup<"<gfortran group>">,
DocName<"Fortran compilation flags">, DocBrief<[{
Flags that will be passed onto the ``gfortran`` compiler when Clang is given
a Fortran input.}]>;
def Link_Group : OptionGroup<"<T/e/s/t/u group>">, DocName<"Linker flags">,
DocBrief<[{Flags that are passed on to the linker}]>;
def T_Group : OptionGroup<"<T group>">, Group<Link_Group>, DocFlatten;
def u_Group : OptionGroup<"<u group>">, Group<Link_Group>, DocFlatten;
def reserved_lib_Group : OptionGroup<"<reserved libs group>">,
Flags<[Unsupported]>;
// Temporary groups for clang options which we know we don't support,
// but don't want to verbosely warn the user about.
def clang_ignored_f_Group : OptionGroup<"<clang ignored f group>">,
Group<f_Group>, Flags<[Ignored]>;
def clang_ignored_m_Group : OptionGroup<"<clang ignored m group>">,
Group<m_Group>, Flags<[Ignored]>;
// Group for clang options in the process of deprecation.
// Please include the version that deprecated the flag as comment to allow
// easier garbage collection.
def clang_ignored_legacy_options_Group : OptionGroup<"<clang legacy flags>">,
Group<f_Group>, Flags<[Ignored]>;
// Retired with clang-5.0
def : Flag<["-"], "fslp-vectorize-aggressive">, Group<clang_ignored_legacy_options_Group>;
def : Flag<["-"], "fno-slp-vectorize-aggressive">, Group<clang_ignored_legacy_options_Group>;
// Group that ignores all gcc optimizations that won't be implemented
def clang_ignored_gcc_optimization_f_Group : OptionGroup<
"<clang_ignored_gcc_optimization_f_Group>">, Group<f_Group>, Flags<[Ignored]>;
/////////
// Options
// The internal option ID must be a valid C++ identifier and results in a
// clang::driver::options::OPT_XX enum constant for XX.
//
// We want to unambiguously be able to refer to options from the driver source
// code, for this reason the option name is mangled into an ID. This mangling
// isn't guaranteed to have an inverse, but for practical purposes it does.
//
// The mangling scheme is to ignore the leading '-', and perform the following
// substitutions:
// _ => __
// - => _
// / => _SLASH
// # => _HASH
// ? => _QUESTION
// , => _COMMA
// = => _EQ
// C++ => CXX
// . => _
// Developer Driver Options
def internal_Group : OptionGroup<"<clang internal options>">, Flags<[HelpHidden]>;
def internal_driver_Group : OptionGroup<"<clang driver internal options>">,
Group<internal_Group>, HelpText<"DRIVER OPTIONS">;
def internal_debug_Group :
OptionGroup<"<clang debug/development internal options>">,
Group<internal_Group>, HelpText<"DEBUG/DEVELOPMENT OPTIONS">;
class InternalDriverOpt : Group<internal_driver_Group>,
Flags<[DriverOption, HelpHidden]>;
def driver_mode : Joined<["--"], "driver-mode=">, Group<internal_driver_Group>,
Flags<[CoreOption, DriverOption, HelpHidden]>,
HelpText<"Set the driver mode to either 'gcc', 'g++', 'cpp', or 'cl'">;
def rsp_quoting : Joined<["--"], "rsp-quoting=">, Group<internal_driver_Group>,
Flags<[CoreOption, DriverOption, HelpHidden]>,
HelpText<"Set the rsp quoting to either 'posix', or 'windows'">;
def ccc_gcc_name : Separate<["-"], "ccc-gcc-name">, InternalDriverOpt,
HelpText<"Name for native GCC compiler">,
MetaVarName<"<gcc-path>">;
def ccc_pch_is_pch : Flag<["-"], "ccc-pch-is-pch">, InternalDriverOpt,
HelpText<"Use lazy PCH for precompiled headers">;
def ccc_pch_is_pth : Flag<["-"], "ccc-pch-is-pth">, InternalDriverOpt,
HelpText<"Use pretokenized headers for precompiled headers">;
class InternalDebugOpt : Group<internal_debug_Group>,
Flags<[DriverOption, HelpHidden, CoreOption]>;
def ccc_install_dir : Separate<["-"], "ccc-install-dir">, InternalDebugOpt,
HelpText<"Simulate installation in the given directory">;
def ccc_print_phases : Flag<["-"], "ccc-print-phases">, InternalDebugOpt,
HelpText<"Dump list of actions to perform">;
def ccc_print_bindings : Flag<["-"], "ccc-print-bindings">, InternalDebugOpt,
HelpText<"Show bindings of tools to actions">;
def ccc_arcmt_check : Flag<["-"], "ccc-arcmt-check">, InternalDriverOpt,
HelpText<"Check for ARC migration issues that need manual handling">;
def ccc_arcmt_modify : Flag<["-"], "ccc-arcmt-modify">, InternalDriverOpt,
HelpText<"Apply modifications to files to conform to ARC">;
def ccc_arcmt_migrate : Separate<["-"], "ccc-arcmt-migrate">, InternalDriverOpt,
HelpText<"Apply modifications and produces temporary files that conform to ARC">;
def arcmt_migrate_report_output : Separate<["-"], "arcmt-migrate-report-output">,
HelpText<"Output path for the plist report">, Flags<[CC1Option]>;
def arcmt_migrate_emit_arc_errors : Flag<["-"], "arcmt-migrate-emit-errors">,
HelpText<"Emit ARC errors even if the migrator can fix them">,
Flags<[CC1Option]>;
def gen_reproducer: Flag<["-"], "gen-reproducer">, InternalDebugOpt,
HelpText<"Auto-generates preprocessed source files and a reproduction script">;
def _migrate : Flag<["--"], "migrate">, Flags<[DriverOption]>,
HelpText<"Run the migrator">;
def ccc_objcmt_migrate : Separate<["-"], "ccc-objcmt-migrate">,
InternalDriverOpt,
HelpText<"Apply modifications and produces temporary files to migrate to "
"modern ObjC syntax">;
def objcmt_migrate_literals : Flag<["-"], "objcmt-migrate-literals">, Flags<[CC1Option]>,
HelpText<"Enable migration to modern ObjC literals">;
def objcmt_migrate_subscripting : Flag<["-"], "objcmt-migrate-subscripting">, Flags<[CC1Option]>,
HelpText<"Enable migration to modern ObjC subscripting">;
def objcmt_migrate_property : Flag<["-"], "objcmt-migrate-property">, Flags<[CC1Option]>,
HelpText<"Enable migration to modern ObjC property">;
def objcmt_migrate_all : Flag<["-"], "objcmt-migrate-all">, Flags<[CC1Option]>,
HelpText<"Enable migration to modern ObjC">;
def objcmt_migrate_readonly_property : Flag<["-"], "objcmt-migrate-readonly-property">, Flags<[CC1Option]>,
HelpText<"Enable migration to modern ObjC readonly property">;
def objcmt_migrate_readwrite_property : Flag<["-"], "objcmt-migrate-readwrite-property">, Flags<[CC1Option]>,
HelpText<"Enable migration to modern ObjC readwrite property">;
def objcmt_migrate_property_dot_syntax : Flag<["-"], "objcmt-migrate-property-dot-syntax">, Flags<[CC1Option]>,
HelpText<"Enable migration of setter/getter messages to property-dot syntax">;
def objcmt_migrate_annotation : Flag<["-"], "objcmt-migrate-annotation">, Flags<[CC1Option]>,
HelpText<"Enable migration to property and method annotations">;
def objcmt_migrate_instancetype : Flag<["-"], "objcmt-migrate-instancetype">, Flags<[CC1Option]>,
HelpText<"Enable migration to infer instancetype for method result type">;
def objcmt_migrate_nsmacros : Flag<["-"], "objcmt-migrate-ns-macros">, Flags<[CC1Option]>,
HelpText<"Enable migration to NS_ENUM/NS_OPTIONS macros">;
def objcmt_migrate_protocol_conformance : Flag<["-"], "objcmt-migrate-protocol-conformance">, Flags<[CC1Option]>,
HelpText<"Enable migration to add protocol conformance on classes">;
def objcmt_atomic_property : Flag<["-"], "objcmt-atomic-property">, Flags<[CC1Option]>,
HelpText<"Make migration to 'atomic' properties">;
def objcmt_returns_innerpointer_property : Flag<["-"], "objcmt-returns-innerpointer-property">, Flags<[CC1Option]>,
HelpText<"Enable migration to annotate property with NS_RETURNS_INNER_POINTER">;
def objcmt_ns_nonatomic_iosonly: Flag<["-"], "objcmt-ns-nonatomic-iosonly">, Flags<[CC1Option]>,
HelpText<"Enable migration to use NS_NONATOMIC_IOSONLY macro for setting property's 'atomic' attribute">;
def objcmt_migrate_designated_init : Flag<["-"], "objcmt-migrate-designated-init">, Flags<[CC1Option]>,
HelpText<"Enable migration to infer NS_DESIGNATED_INITIALIZER for initializer methods">;
def objcmt_whitelist_dir_path: Joined<["-"], "objcmt-whitelist-dir-path=">, Flags<[CC1Option]>,
HelpText<"Only modify files with a filename contained in the provided directory path">;
// The misspelt "white-list" [sic] alias is due for removal.
def : Joined<["-"], "objcmt-white-list-dir-path=">, Flags<[CC1Option]>,
Alias<objcmt_whitelist_dir_path>;
// Make sure all other -ccc- options are rejected.
def ccc_ : Joined<["-"], "ccc-">, Group<internal_Group>, Flags<[Unsupported]>;
// Standard Options
def _HASH_HASH_HASH : Flag<["-"], "###">, Flags<[DriverOption, CoreOption]>,
HelpText<"Print (but do not run) the commands to run for this compilation">;
def _DASH_DASH : Option<["--"], "", KIND_REMAINING_ARGS>,
Flags<[DriverOption, CoreOption]>;
def A : JoinedOrSeparate<["-"], "A">, Flags<[RenderJoined]>, Group<gfortran_Group>;
def B : JoinedOrSeparate<["-"], "B">, MetaVarName<"<dir>">,
HelpText<"Add <dir> to search path for binaries and object files used implicitly">;
def CC : Flag<["-"], "CC">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
HelpText<"Include comments from within macros in preprocessed output">;
def C : Flag<["-"], "C">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
HelpText<"Include comments in preprocessed output">;
def D : JoinedOrSeparate<["-"], "D">, Group<Preprocessor_Group>,
Flags<[CC1Option]>, MetaVarName<"<macro>=<value>">,
HelpText<"Define <macro> to <value> (or 1 if <value> omitted)">;
def E : Flag<["-"], "E">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>,
HelpText<"Only run the preprocessor">;
def F : JoinedOrSeparate<["-"], "F">, Flags<[RenderJoined,CC1Option]>,
HelpText<"Add directory to framework include search path">;
def G : JoinedOrSeparate<["-"], "G">, Flags<[DriverOption]>, Group<m_Group>,
MetaVarName<"<size>">, HelpText<"Put objects of at most <size> bytes "
"into small data section (MIPS / Hexagon)">;
def G_EQ : Joined<["-"], "G=">, Flags<[DriverOption]>, Group<m_Group>, Alias<G>;
def H : Flag<["-"], "H">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
HelpText<"Show header includes and nesting depth">;
def I_ : Flag<["-"], "I-">, Group<I_Group>,
HelpText<"Restrict all prior -I flags to double-quoted inclusion and "
"remove current directory from include path">;
def I : JoinedOrSeparate<["-"], "I">, Group<I_Group>,
Flags<[CC1Option,CC1AsOption]>, MetaVarName<"<dir>">,
HelpText<"Add directory to include search path">;
def L : JoinedOrSeparate<["-"], "L">, Flags<[RenderJoined]>, Group<Link_Group>,
MetaVarName<"<dir>">, HelpText<"Add directory to library search path">;
def MD : Flag<["-"], "MD">, Group<M_Group>,
HelpText<"Write a depfile containing user and system headers">;
def MMD : Flag<["-"], "MMD">, Group<M_Group>,
HelpText<"Write a depfile containing user headers">;
def M : Flag<["-"], "M">, Group<M_Group>,
HelpText<"Like -MD, but also implies -E and writes to stdout by default">;
def MM : Flag<["-"], "MM">, Group<M_Group>,
HelpText<"Like -MMD, but also implies -E and writes to stdout by default">;
def MF : JoinedOrSeparate<["-"], "MF">, Group<M_Group>,
HelpText<"Write depfile output from -MMD, -MD, -MM, or -M to <file>">,
MetaVarName<"<file>">;
def MG : Flag<["-"], "MG">, Group<M_Group>, Flags<[CC1Option]>,
HelpText<"Add missing headers to depfile">;
def MJ : JoinedOrSeparate<["-"], "MJ">, Group<M_Group>,
HelpText<"Write a compilation database entry per input">;
def MP : Flag<["-"], "MP">, Group<M_Group>, Flags<[CC1Option]>,
HelpText<"Create phony target for each dependency (other than main file)">;
def MQ : JoinedOrSeparate<["-"], "MQ">, Group<M_Group>, Flags<[CC1Option]>,
HelpText<"Specify name of main file output to quote in depfile">;
def MT : JoinedOrSeparate<["-"], "MT">, Group<M_Group>, Flags<[CC1Option]>,
HelpText<"Specify name of main file output in depfile">;
def MV : Flag<["-"], "MV">, Group<M_Group>, Flags<[CC1Option]>,
HelpText<"Use NMake/Jom format for the depfile">;
def Mach : Flag<["-"], "Mach">, Group<Link_Group>;
def O0 : Flag<["-"], "O0">, Group<O_Group>, Flags<[CC1Option, HelpHidden]>;
def O4 : Flag<["-"], "O4">, Group<O_Group>, Flags<[CC1Option, HelpHidden]>;
def ObjCXX : Flag<["-"], "ObjC++">, Flags<[DriverOption]>,
HelpText<"Treat source input files as Objective-C++ inputs">;
def ObjC : Flag<["-"], "ObjC">, Flags<[DriverOption]>,
HelpText<"Treat source input files as Objective-C inputs">;
def O : Joined<["-"], "O">, Group<O_Group>, Flags<[CC1Option]>;
def O_flag : Flag<["-"], "O">, Flags<[CC1Option]>, Alias<O>, AliasArgs<["2"]>;
def Ofast : Joined<["-"], "Ofast">, Group<O_Group>, Flags<[CC1Option]>;
def P : Flag<["-"], "P">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
HelpText<"Disable linemarker output in -E mode">;
def Qy : Flag<["-"], "Qy">, Flags<[CC1Option]>,
HelpText<"Emit metadata containing compiler name and version">;
def Qn : Flag<["-"], "Qn">, Flags<[CC1Option]>,
HelpText<"Do not emit metadata containing compiler name and version">;
def : Flag<["-"], "fident">, Group<f_Group>, Alias<Qy>, Flags<[CC1Option]>;
def : Flag<["-"], "fno-ident">, Group<f_Group>, Alias<Qn>, Flags<[CC1Option]>;
def Qunused_arguments : Flag<["-"], "Qunused-arguments">, Flags<[DriverOption, CoreOption]>,
HelpText<"Don't emit warning for unused driver arguments">;
def Q : Flag<["-"], "Q">, IgnoredGCCCompat;
def Rpass_EQ : Joined<["-"], "Rpass=">, Group<R_value_Group>, Flags<[CC1Option]>,
HelpText<"Report transformations performed by optimization passes whose "
"name matches the given POSIX regular expression">;
def Rpass_missed_EQ : Joined<["-"], "Rpass-missed=">, Group<R_value_Group>,
Flags<[CC1Option]>,
HelpText<"Report missed transformations by optimization passes whose "
"name matches the given POSIX regular expression">;
def Rpass_analysis_EQ : Joined<["-"], "Rpass-analysis=">, Group<R_value_Group>,
Flags<[CC1Option]>,
HelpText<"Report transformation analysis from optimization passes whose "
"name matches the given POSIX regular expression">;
def R_Joined : Joined<["-"], "R">, Group<R_Group>, Flags<[CC1Option, CoreOption]>,
MetaVarName<"<remark>">, HelpText<"Enable the specified remark">;
def S : Flag<["-"], "S">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>,
HelpText<"Only run preprocess and compilation steps">;
def Tbss : JoinedOrSeparate<["-"], "Tbss">, Group<T_Group>,
MetaVarName<"<addr>">, HelpText<"Set starting address of BSS to <addr>">;
def Tdata : JoinedOrSeparate<["-"], "Tdata">, Group<T_Group>,
MetaVarName<"<addr>">, HelpText<"Set starting address of DATA to <addr>">;
def Ttext : JoinedOrSeparate<["-"], "Ttext">, Group<T_Group>,
MetaVarName<"<addr>">, HelpText<"Set starting address of TEXT to <addr>">;
def T : JoinedOrSeparate<["-"], "T">, Group<T_Group>,
MetaVarName<"<script>">, HelpText<"Specify <script> as linker script">;
def U : JoinedOrSeparate<["-"], "U">, Group<Preprocessor_Group>,
Flags<[CC1Option]>, MetaVarName<"<macro>">, HelpText<"Undefine macro <macro>">;
def V : JoinedOrSeparate<["-"], "V">, Flags<[DriverOption, Unsupported]>;
def Wa_COMMA : CommaJoined<["-"], "Wa,">,
HelpText<"Pass the comma separated arguments in <arg> to the assembler">,
MetaVarName<"<arg>">;
def Wall : Flag<["-"], "Wall">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
def WCL4 : Flag<["-"], "WCL4">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
def Wdeprecated : Flag<["-"], "Wdeprecated">, Group<W_Group>, Flags<[CC1Option]>,
HelpText<"Enable warnings for deprecated constructs and define __DEPRECATED">;
def Wno_deprecated : Flag<["-"], "Wno-deprecated">, Group<W_Group>, Flags<[CC1Option]>;
def Wl_COMMA : CommaJoined<["-"], "Wl,">, Flags<[LinkerInput, RenderAsInput]>,
HelpText<"Pass the comma separated arguments in <arg> to the linker">,
MetaVarName<"<arg>">, Group<Link_Group>;
// FIXME: This is broken; these should not be Joined arguments.
def Wno_nonportable_cfstrings : Joined<["-"], "Wno-nonportable-cfstrings">, Group<W_Group>,
Flags<[CC1Option]>;
def Wnonportable_cfstrings : Joined<["-"], "Wnonportable-cfstrings">, Group<W_Group>,
Flags<[CC1Option]>;
def Wp_COMMA : CommaJoined<["-"], "Wp,">,
HelpText<"Pass the comma separated arguments in <arg> to the preprocessor">,
MetaVarName<"<arg>">, Group<Preprocessor_Group>;
def Wwrite_strings : Flag<["-"], "Wwrite-strings">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
def Wno_write_strings : Flag<["-"], "Wno-write-strings">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
def W_Joined : Joined<["-"], "W">, Group<W_Group>, Flags<[CC1Option, CoreOption]>,
MetaVarName<"<warning>">, HelpText<"Enable the specified warning">;
def Xanalyzer : Separate<["-"], "Xanalyzer">,
HelpText<"Pass <arg> to the static analyzer">, MetaVarName<"<arg>">,
Group<StaticAnalyzer_Group>;
def Xarch__ : JoinedAndSeparate<["-"], "Xarch_">, Flags<[DriverOption]>;
def Xassembler : Separate<["-"], "Xassembler">,
HelpText<"Pass <arg> to the assembler">, MetaVarName<"<arg>">,
Group<CompileOnly_Group>;
def Xclang : Separate<["-"], "Xclang">,
HelpText<"Pass <arg> to the clang compiler">, MetaVarName<"<arg>">,
Flags<[DriverOption, CoreOption]>, Group<CompileOnly_Group>;
def Xcuda_fatbinary : Separate<["-"], "Xcuda-fatbinary">,
HelpText<"Pass <arg> to fatbinary invocation">, MetaVarName<"<arg>">;
def Xcuda_ptxas : Separate<["-"], "Xcuda-ptxas">,
HelpText<"Pass <arg> to the ptxas assembler">, MetaVarName<"<arg>">;
def Xopenmp_target : Separate<["-"], "Xopenmp-target">,
HelpText<"Pass <arg> to the target offloading toolchain.">, MetaVarName<"<arg>">;
def Xopenmp_target_EQ : JoinedAndSeparate<["-"], "Xopenmp-target=">,
HelpText<"Pass <arg> to the target offloading toolchain identified by <triple>.">,
MetaVarName<"<triple> <arg>">;
def z : Separate<["-"], "z">, Flags<[LinkerInput, RenderAsInput]>,
HelpText<"Pass -z <arg> to the linker">, MetaVarName<"<arg>">,
Group<Link_Group>;
def Xlinker : Separate<["-"], "Xlinker">, Flags<[LinkerInput, RenderAsInput]>,
HelpText<"Pass <arg> to the linker">, MetaVarName<"<arg>">,
Group<Link_Group>;
def Xpreprocessor : Separate<["-"], "Xpreprocessor">, Group<Preprocessor_Group>,
HelpText<"Pass <arg> to the preprocessor">, MetaVarName<"<arg>">;
def X_Flag : Flag<["-"], "X">, Group<Link_Group>;
def X_Joined : Joined<["-"], "X">, IgnoredGCCCompat;
def Z_Flag : Flag<["-"], "Z">, Group<Link_Group>;
// FIXME: All we do with this is reject it. Remove.
def Z_Joined : Joined<["-"], "Z">;
def all__load : Flag<["-"], "all_load">;
def allowable__client : Separate<["-"], "allowable_client">;
def ansi : Flag<["-", "--"], "ansi">;
def arch__errors__fatal : Flag<["-"], "arch_errors_fatal">;
def arch : Separate<["-"], "arch">, Flags<[DriverOption]>;
def arch__only : Separate<["-"], "arch_only">;
def a : Joined<["-"], "a">;
def autocomplete : Joined<["--"], "autocomplete=">;
def bind__at__load : Flag<["-"], "bind_at_load">;
def bundle__loader : Separate<["-"], "bundle_loader">;
def bundle : Flag<["-"], "bundle">;
def b : JoinedOrSeparate<["-"], "b">, Flags<[Unsupported]>;
def cfguard : Flag<["-"], "cfguard">, Flags<[CC1Option]>,
HelpText<"Emit tables required for Windows Control Flow Guard.">;
def cl_opt_disable : Flag<["-"], "cl-opt-disable">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. This option disables all optimizations. By default optimizations are enabled.">;
def cl_strict_aliasing : Flag<["-"], "cl-strict-aliasing">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. This option is added for compatibility with OpenCL 1.0.">;
def cl_single_precision_constant : Flag<["-"], "cl-single-precision-constant">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Treat double precision floating-point constant as single precision constant.">;
def cl_finite_math_only : Flag<["-"], "cl-finite-math-only">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Allow floating-point optimizations that assume arguments and results are not NaNs or +-Inf.">;
def cl_kernel_arg_info : Flag<["-"], "cl-kernel-arg-info">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Generate kernel argument metadata.">;
def cl_unsafe_math_optimizations : Flag<["-"], "cl-unsafe-math-optimizations">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Allow unsafe floating-point optimizations. Also implies -cl-no-signed-zeros and -cl-mad-enable.">;
def cl_fast_relaxed_math : Flag<["-"], "cl-fast-relaxed-math">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Sets -cl-finite-math-only and -cl-unsafe-math-optimizations, and defines __FAST_RELAXED_MATH__.">;
def cl_mad_enable : Flag<["-"], "cl-mad-enable">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Allow use of less precise MAD computations in the generated binary.">;
def cl_no_signed_zeros : Flag<["-"], "cl-no-signed-zeros">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Allow use of less precise no signed zeros computations in the generated binary.">;
def cl_std_EQ : Joined<["-"], "cl-std=">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL language standard to compile for.">, Values<"cl,CL,cl1.1,CL1.1,cl1.2,CL1.2,cl2.0,CL2.0,c++">;
def cl_denorms_are_zero : Flag<["-"], "cl-denorms-are-zero">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Allow denormals to be flushed to zero.">;
def cl_fp32_correctly_rounded_divide_sqrt : Flag<["-"], "cl-fp32-correctly-rounded-divide-sqrt">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Specify that single precision floating-point divide and sqrt used in the program source are correctly rounded.">;
def cl_uniform_work_group_size : Flag<["-"], "cl-uniform-work-group-size">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Defines that the global work-size be a multiple of the work-group size specified to clEnqueueNDRangeKernel">;
def client__name : JoinedOrSeparate<["-"], "client_name">;
def combine : Flag<["-", "--"], "combine">, Flags<[DriverOption, Unsupported]>;
def compatibility__version : JoinedOrSeparate<["-"], "compatibility_version">;
def config : Separate<["--"], "config">, Flags<[DriverOption]>,
HelpText<"Specifies configuration file">;
def config_system_dir_EQ : Joined<["--"], "config-system-dir=">, Flags<[DriverOption, HelpHidden]>,
HelpText<"System directory for configuration files">;
def config_user_dir_EQ : Joined<["--"], "config-user-dir=">, Flags<[DriverOption, HelpHidden]>,
HelpText<"User directory for configuration files">;
def coverage : Flag<["-", "--"], "coverage">, Flags<[CoreOption]>;
def cpp_precomp : Flag<["-"], "cpp-precomp">, Group<clang_ignored_f_Group>;
def current__version : JoinedOrSeparate<["-"], "current_version">;
def cxx_isystem : JoinedOrSeparate<["-"], "cxx-isystem">, Group<clang_i_Group>,
HelpText<"Add directory to the C++ SYSTEM include search path">, Flags<[CC1Option]>,
MetaVarName<"<directory>">;
def c : Flag<["-"], "c">, Flags<[DriverOption]>, Group<Action_Group>,
HelpText<"Only run preprocess, compile, and assemble steps">;
def cuda_device_only : Flag<["--"], "cuda-device-only">,
HelpText<"Compile CUDA code for device only">;
def cuda_host_only : Flag<["--"], "cuda-host-only">,
HelpText<"Compile CUDA code for host only. Has no effect on non-CUDA "
"compilations.">;
def cuda_compile_host_device : Flag<["--"], "cuda-compile-host-device">,
HelpText<"Compile CUDA code for both host and device (default). Has no "
"effect on non-CUDA compilations.">;
def cuda_include_ptx_EQ : Joined<["--"], "cuda-include-ptx=">, Flags<[DriverOption]>,
HelpText<"Include PTX for the follwing GPU architecture (e.g. sm_35) or 'all'. May be specified more than once.">;
def no_cuda_include_ptx_EQ : Joined<["--"], "no-cuda-include-ptx=">, Flags<[DriverOption]>,
HelpText<"Do not include PTX for the follwing GPU architecture (e.g. sm_35) or 'all'. May be specified more than once.">;
def cuda_gpu_arch_EQ : Joined<["--"], "cuda-gpu-arch=">, Flags<[DriverOption]>,
HelpText<"CUDA GPU architecture (e.g. sm_35). May be specified more than once.">;
def hip_link : Flag<["--"], "hip-link">,
HelpText<"Link clang-offload-bundler bundles for HIP">;
def no_cuda_gpu_arch_EQ : Joined<["--"], "no-cuda-gpu-arch=">, Flags<[DriverOption]>,
HelpText<"Remove GPU architecture (e.g. sm_35) from the list of GPUs to compile for. "
"'all' resets the list to its default value.">;
def cuda_noopt_device_debug : Flag<["--"], "cuda-noopt-device-debug">,
HelpText<"Enable device-side debug info generation. Disables ptxas optimizations.">;
def no_cuda_version_check : Flag<["--"], "no-cuda-version-check">,
HelpText<"Don't error out if the detected version of the CUDA install is "
"too low for the requested CUDA gpu architecture.">;
def no_cuda_noopt_device_debug : Flag<["--"], "no-cuda-noopt-device-debug">;
def cuda_path_EQ : Joined<["--"], "cuda-path=">, Group<i_Group>,
HelpText<"CUDA installation path">;
def cuda_path_ignore_env : Flag<["--"], "cuda-path-ignore-env">, Group<i_Group>,
HelpText<"Ignore environment variables to detect CUDA installation">;
def ptxas_path_EQ : Joined<["--"], "ptxas-path=">, Group<i_Group>,
HelpText<"Path to ptxas (used for compiling CUDA code)">;
def fcuda_flush_denormals_to_zero : Flag<["-"], "fcuda-flush-denormals-to-zero">,
Flags<[CC1Option]>, HelpText<"Flush denormal floating point values to zero in CUDA device mode.">;
def fno_cuda_flush_denormals_to_zero : Flag<["-"], "fno-cuda-flush-denormals-to-zero">;
def fcuda_approx_transcendentals : Flag<["-"], "fcuda-approx-transcendentals">,
Flags<[CC1Option]>, HelpText<"Use approximate transcendental functions">;
def fno_cuda_approx_transcendentals : Flag<["-"], "fno-cuda-approx-transcendentals">;
def fcuda_rdc : Flag<["-"], "fcuda-rdc">, Flags<[CC1Option]>,
HelpText<"Generate relocatable device code, also known as separate compilation mode.">;
def fno_cuda_rdc : Flag<["-"], "fno-cuda-rdc">;
def fcuda_short_ptr : Flag<["-"], "fcuda-short-ptr">, Flags<[CC1Option]>,
HelpText<"Use 32-bit pointers for accessing const/local/shared address spaces.">;
def fno_cuda_short_ptr : Flag<["-"], "fno-cuda-short-ptr">;
def hip_device_lib_path_EQ : Joined<["--"], "hip-device-lib-path=">, Group<Link_Group>,
HelpText<"HIP device library path">;
def hip_device_lib_EQ : Joined<["--"], "hip-device-lib=">, Group<Link_Group>,
HelpText<"HIP device library">;
def fhip_dump_offload_linker_script : Flag<["-"], "fhip-dump-offload-linker-script">,
Group<f_Group>, Flags<[NoArgumentUnused, HelpHidden]>;
def dA : Flag<["-"], "dA">, Group<d_Group>;
def dD : Flag<["-"], "dD">, Group<d_Group>, Flags<[CC1Option]>,
HelpText<"Print macro definitions in -E mode in addition to normal output">;
def dI : Flag<["-"], "dI">, Group<d_Group>, Flags<[CC1Option]>,
HelpText<"Print include directives in -E mode in addition to normal output">;
def dM : Flag<["-"], "dM">, Group<d_Group>, Flags<[CC1Option]>,
HelpText<"Print macro definitions in -E mode instead of normal output">;
def dead__strip : Flag<["-"], "dead_strip">;
def dependency_file : Separate<["-"], "dependency-file">, Flags<[CC1Option]>,
HelpText<"Filename (or -) to write dependency output to">;
def dependency_dot : Separate<["-"], "dependency-dot">, Flags<[CC1Option]>,
HelpText<"Filename to write DOT-formatted header dependencies to">;
def module_dependency_dir : Separate<["-"], "module-dependency-dir">,
Flags<[CC1Option]>, HelpText<"Directory to dump module dependencies to">;
def dumpmachine : Flag<["-"], "dumpmachine">;
def dumpspecs : Flag<["-"], "dumpspecs">, Flags<[Unsupported]>;
def dumpversion : Flag<["-"], "dumpversion">;
def dylib__file : Separate<["-"], "dylib_file">;
def dylinker__install__name : JoinedOrSeparate<["-"], "dylinker_install_name">;
def dylinker : Flag<["-"], "dylinker">;
def dynamiclib : Flag<["-"], "dynamiclib">;
def dynamic : Flag<["-"], "dynamic">, Flags<[NoArgumentUnused]>;
def d_Flag : Flag<["-"], "d">, Group<d_Group>;
def d_Joined : Joined<["-"], "d">, Group<d_Group>;
def emit_ast : Flag<["-"], "emit-ast">,
HelpText<"Emit Clang AST files for source inputs">;
def emit_llvm : Flag<["-"], "emit-llvm">, Flags<[CC1Option]>, Group<Action_Group>,
HelpText<"Use the LLVM representation for assembler and object files">;
def exported__symbols__list : Separate<["-"], "exported_symbols_list">;
def e : JoinedOrSeparate<["-"], "e">, Group<Link_Group>;
def fPIC : Flag<["-"], "fPIC">, Group<f_Group>;
def fno_PIC : Flag<["-"], "fno-PIC">, Group<f_Group>;
def fPIE : Flag<["-"], "fPIE">, Group<f_Group>;
def fno_PIE : Flag<["-"], "fno-PIE">, Group<f_Group>;
def faccess_control : Flag<["-"], "faccess-control">, Group<f_Group>;
def falign_functions : Flag<["-"], "falign-functions">, Group<f_Group>;
def falign_functions_EQ : Joined<["-"], "falign-functions=">, Group<f_Group>;
def fno_align_functions: Flag<["-"], "fno-align-functions">, Group<f_Group>;
def fallow_unsupported : Flag<["-"], "fallow-unsupported">, Group<f_Group>;
def fapple_kext : Flag<["-"], "fapple-kext">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use Apple's kernel extensions ABI">;
def fapple_pragma_pack : Flag<["-"], "fapple-pragma-pack">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable Apple gcc-compatible #pragma pack handling">;
def shared_libsan : Flag<["-"], "shared-libsan">;
def static_libsan : Flag<["-"], "static-libsan">;
def : Flag<["-"], "shared-libasan">, Alias<shared_libsan>;
def fasm : Flag<["-"], "fasm">, Group<f_Group>;
def fasm_blocks : Flag<["-"], "fasm-blocks">, Group<f_Group>, Flags<[CC1Option]>;
def fno_asm_blocks : Flag<["-"], "fno-asm-blocks">, Group<f_Group>;
def fassume_sane_operator_new : Flag<["-"], "fassume-sane-operator-new">, Group<f_Group>;
def fastcp : Flag<["-"], "fastcp">, Group<f_Group>;
def fastf : Flag<["-"], "fastf">, Group<f_Group>;
def fast : Flag<["-"], "fast">, Group<f_Group>;
def fasynchronous_unwind_tables : Flag<["-"], "fasynchronous-unwind-tables">, Group<f_Group>;
def fdouble_square_bracket_attributes : Flag<[ "-" ], "fdouble-square-bracket-attributes">,
Group<f_Group>, Flags<[DriverOption, CC1Option]>,
HelpText<"Enable '[[]]' attributes in all C and C++ language modes">;
def fno_double_square_bracket_attributes : Flag<[ "-" ], "fno-double-square-bracket-attributes">,
Group<f_Group>, Flags<[DriverOption, CC1Option]>,
HelpText<"Disable '[[]]' attributes in all C and C++ language modes">;
def fautolink : Flag <["-"], "fautolink">, Group<f_Group>;
def fno_autolink : Flag <["-"], "fno-autolink">, Group<f_Group>,
Flags<[DriverOption, CC1Option]>,
HelpText<"Disable generation of linker directives for automatic library linking">;
// C++ Coroutines TS
def fcoroutines_ts : Flag <["-"], "fcoroutines-ts">, Group<f_Group>,
Flags<[DriverOption, CC1Option]>,
HelpText<"Enable support for the C++ Coroutines TS">;
def fno_coroutines_ts : Flag <["-"], "fno-coroutines-ts">, Group<f_Group>,
Flags<[DriverOption]>;
def fembed_bitcode_EQ : Joined<["-"], "fembed-bitcode=">,
Group<f_Group>, Flags<[DriverOption, CC1Option]>, MetaVarName<"<option>">,
HelpText<"Embed LLVM bitcode (option: off, all, bitcode, marker)">;
def fembed_bitcode : Flag<["-"], "fembed-bitcode">, Group<f_Group>,
Alias<fembed_bitcode_EQ>, AliasArgs<["all"]>,
HelpText<"Embed LLVM IR bitcode as data">;
def fembed_bitcode_marker : Flag<["-"], "fembed-bitcode-marker">,
Alias<fembed_bitcode_EQ>, AliasArgs<["marker"]>,
HelpText<"Embed placeholder LLVM IR data as a marker">;
def fgnu_inline_asm : Flag<["-"], "fgnu-inline-asm">, Group<f_Group>, Flags<[DriverOption]>;
def fno_gnu_inline_asm : Flag<["-"], "fno-gnu-inline-asm">, Group<f_Group>,
Flags<[DriverOption, CC1Option]>,
HelpText<"Disable GNU style inline asm">;
def fprofile_sample_use : Flag<["-"], "fprofile-sample-use">, Group<f_Group>,
Flags<[CoreOption]>;
def fno_profile_sample_use : Flag<["-"], "fno-profile-sample-use">, Group<f_Group>,
Flags<[CoreOption]>;
def fprofile_sample_use_EQ : Joined<["-"], "fprofile-sample-use=">,
Group<f_Group>, Flags<[DriverOption, CC1Option]>,
HelpText<"Enable sample-based profile guided optimizations">;
def fprofile_sample_accurate : Flag<["-"], "fprofile-sample-accurate">,
Group<f_Group>, Flags<[DriverOption, CC1Option]>,
HelpText<"Specifies that the sample profile is accurate">,
DocBrief<[{Specifies that the sample profile is accurate. If the sample
profile is accurate, callsites without profile samples are marked
as cold. Otherwise, treat callsites without profile samples as if
we have no profile}]>;
def fno_profile_sample_accurate : Flag<["-"], "fno-profile-sample-accurate">,
Group<f_Group>, Flags<[DriverOption]>;
def fauto_profile : Flag<["-"], "fauto-profile">, Group<f_Group>,
Alias<fprofile_sample_use>;
def fno_auto_profile : Flag<["-"], "fno-auto-profile">, Group<f_Group>,
Alias<fno_profile_sample_use>;
def fauto_profile_EQ : Joined<["-"], "fauto-profile=">,
Alias<fprofile_sample_use_EQ>;
def fauto_profile_accurate : Flag<["-"], "fauto-profile-accurate">,
Group<f_Group>, Alias<fprofile_sample_accurate>;
def fno_auto_profile_accurate : Flag<["-"], "fno-auto-profile-accurate">,
Group<f_Group>, Alias<fno_profile_sample_accurate>;
def fdebug_info_for_profiling : Flag<["-"], "fdebug-info-for-profiling">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Emit extra debug info to make sample profile more accurate.">;
def fno_debug_info_for_profiling : Flag<["-"], "fno-debug-info-for-profiling">, Group<f_Group>,
Flags<[DriverOption]>,
HelpText<"Do not emit extra debug info for sample profiler.">;
def fprofile_instr_generate : Flag<["-"], "fprofile-instr-generate">,
Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Generate instrumented code to collect execution counts into default.profraw file (overridden by '=' form of option or LLVM_PROFILE_FILE env var)">;
def fprofile_instr_generate_EQ : Joined<["-"], "fprofile-instr-generate=">,
Group<f_Group>, Flags<[CoreOption]>, MetaVarName<"<file>">,
HelpText<"Generate instrumented code to collect execution counts into <file> (overridden by LLVM_PROFILE_FILE env var)">;
def fprofile_instr_use : Flag<["-"], "fprofile-instr-use">, Group<f_Group>,
Flags<[CoreOption]>;
def fprofile_instr_use_EQ : Joined<["-"], "fprofile-instr-use=">,
Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Use instrumentation data for profile-guided optimization">;
def fcoverage_mapping : Flag<["-"], "fcoverage-mapping">,
Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Generate coverage mapping to enable code coverage analysis">;
def fno_coverage_mapping : Flag<["-"], "fno-coverage-mapping">,
Group<f_Group>, Flags<[DriverOption, CoreOption]>,
HelpText<"Disable code coverage analysis">;
def fprofile_generate : Flag<["-"], "fprofile-generate">,
Group<f_Group>, Flags<[DriverOption]>,
HelpText<"Generate instrumented code to collect execution counts into default.profraw (overridden by LLVM_PROFILE_FILE env var)">;
def fprofile_generate_EQ : Joined<["-"], "fprofile-generate=">,
Group<f_Group>, Flags<[DriverOption]>, MetaVarName<"<directory>">,
HelpText<"Generate instrumented code to collect execution counts into <directory>/default.profraw (overridden by LLVM_PROFILE_FILE env var)">;
def fprofile_use : Flag<["-"], "fprofile-use">, Group<f_Group>,
Alias<fprofile_instr_use>;
def fprofile_use_EQ : Joined<["-"], "fprofile-use=">,
Group<f_Group>, Flags<[DriverOption]>, MetaVarName<"<pathname>">,
HelpText<"Use instrumentation data for profile-guided optimization. If pathname is a directory, it reads from <pathname>/default.profdata. Otherwise, it reads from file <pathname>.">;
def fno_profile_instr_generate : Flag<["-"], "fno-profile-instr-generate">,
Group<f_Group>, Flags<[DriverOption]>,
HelpText<"Disable generation of profile instrumentation.">;
def fno_profile_generate : Flag<["-"], "fno-profile-generate">,
Group<f_Group>, Flags<[DriverOption]>,
HelpText<"Disable generation of profile instrumentation.">;
def fno_profile_instr_use : Flag<["-"], "fno-profile-instr-use">,
Group<f_Group>, Flags<[DriverOption]>,
HelpText<"Disable using instrumentation data for profile-guided optimization">;
def fno_profile_use : Flag<["-"], "fno-profile-use">,
Alias<fno_profile_instr_use>;
def faddrsig : Flag<["-"], "faddrsig">, Group<f_Group>, Flags<[CoreOption, CC1Option]>,
HelpText<"Emit an address-significance table">;
def fno_addrsig : Flag<["-"], "fno-addrsig">, Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Don't emit an address-significance table">;
def fblocks : Flag<["-"], "fblocks">, Group<f_Group>, Flags<[CoreOption, CC1Option]>,
HelpText<"Enable the 'blocks' language feature">;
def fbootclasspath_EQ : Joined<["-"], "fbootclasspath=">, Group<f_Group>;
def fborland_extensions : Flag<["-"], "fborland-extensions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Accept non-standard constructs supported by the Borland compiler">;
def fbuiltin : Flag<["-"], "fbuiltin">, Group<f_Group>, Flags<[CoreOption]>;
def fbuiltin_module_map : Flag <["-"], "fbuiltin-module-map">, Group<f_Group>,
Flags<[DriverOption]>, HelpText<"Load the clang builtins module map file.">;
def fcaret_diagnostics : Flag<["-"], "fcaret-diagnostics">, Group<f_Group>;
def fclang_abi_compat_EQ : Joined<["-"], "fclang-abi-compat=">, Group<f_clang_Group>,
Flags<[CC1Option]>, MetaVarName<"<version>">, Values<"<major>.<minor>,latest">,
HelpText<"Attempt to match the ABI of Clang <version>">;
def fclasspath_EQ : Joined<["-"], "fclasspath=">, Group<f_Group>;
def fcolor_diagnostics : Flag<["-"], "fcolor-diagnostics">, Group<f_Group>,
Flags<[CoreOption, CC1Option]>, HelpText<"Use colors in diagnostics">;
def fdiagnostics_color : Flag<["-"], "fdiagnostics-color">, Group<f_Group>,
Flags<[CoreOption, DriverOption]>;
def fdiagnostics_color_EQ : Joined<["-"], "fdiagnostics-color=">, Group<f_Group>;
def fansi_escape_codes : Flag<["-"], "fansi-escape-codes">, Group<f_Group>,
Flags<[CoreOption, CC1Option]>, HelpText<"Use ANSI escape codes for diagnostics">;
def fcomment_block_commands : CommaJoined<["-"], "fcomment-block-commands=">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Treat each comma separated argument in <arg> as a documentation comment block command">,
MetaVarName<"<arg>">;
def fparse_all_comments : Flag<["-"], "fparse-all-comments">, Group<f_clang_Group>, Flags<[CC1Option]>;
def fcommon : Flag<["-"], "fcommon">, Group<f_Group>;
def fcompile_resource_EQ : Joined<["-"], "fcompile-resource=">, Group<f_Group>;
def fcomplete_member_pointers : Flag<["-"], "fcomplete-member-pointers">, Group<f_clang_Group>,
Flags<[CoreOption, CC1Option]>,
HelpText<"Require member pointer base types to be complete if they would be significant under the Microsoft ABI">;
def fno_complete_member_pointers : Flag<["-"], "fno-complete-member-pointers">, Group<f_clang_Group>,
Flags<[CoreOption]>,
HelpText<"Do not require member pointer base types to be complete if they would be significant under the Microsoft ABI">;
def fconstant_cfstrings : Flag<["-"], "fconstant-cfstrings">, Group<f_Group>;
def fconstant_string_class_EQ : Joined<["-"], "fconstant-string-class=">, Group<f_Group>;
def fconstexpr_depth_EQ : Joined<["-"], "fconstexpr-depth=">, Group<f_Group>;
def fconstexpr_steps_EQ : Joined<["-"], "fconstexpr-steps=">, Group<f_Group>;
def fconstexpr_backtrace_limit_EQ : Joined<["-"], "fconstexpr-backtrace-limit=">,
Group<f_Group>;
def fno_crash_diagnostics : Flag<["-"], "fno-crash-diagnostics">, Group<f_clang_Group>, Flags<[NoArgumentUnused]>,
HelpText<"Disable auto-generation of preprocessed source files and a script for reproduction during a clang crash">;
-def fcrash_diagnostics_dir : Joined<["-"], "fcrash-diagnostics-dir=">, Group<f_clang_Group>, Flags<[NoArgumentUnused]>;
+def fcrash_diagnostics_dir : Joined<["-"], "fcrash-diagnostics-dir=">, Group<f_clang_Group>, Flags<[NoArgumentUnused, CoreOption]>;
def fcreate_profile : Flag<["-"], "fcreate-profile">, Group<f_Group>;
def fcxx_exceptions: Flag<["-"], "fcxx-exceptions">, Group<f_Group>,
HelpText<"Enable C++ exceptions">, Flags<[CC1Option]>;
def fcxx_modules : Flag <["-"], "fcxx-modules">, Group<f_Group>,
Flags<[DriverOption]>;
def fdebug_pass_arguments : Flag<["-"], "fdebug-pass-arguments">, Group<f_Group>;
def fdebug_pass_structure : Flag<["-"], "fdebug-pass-structure">, Group<f_Group>;
def fdepfile_entry : Joined<["-"], "fdepfile-entry=">,
Group<f_clang_Group>, Flags<[CC1Option]>;
def fdiagnostics_fixit_info : Flag<["-"], "fdiagnostics-fixit-info">, Group<f_clang_Group>;
def fdiagnostics_parseable_fixits : Flag<["-"], "fdiagnostics-parseable-fixits">, Group<f_clang_Group>,
Flags<[CoreOption, CC1Option]>, HelpText<"Print fix-its in machine parseable form">;
def fdiagnostics_print_source_range_info : Flag<["-"], "fdiagnostics-print-source-range-info">,
Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Print source range spans in numeric form">;
def fdiagnostics_show_hotness : Flag<["-"], "fdiagnostics-show-hotness">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Enable profile hotness information in diagnostic line">;
def fdiagnostics_hotness_threshold_EQ : Joined<["-"], "fdiagnostics-hotness-threshold=">,
Group<f_Group>, Flags<[CC1Option]>, MetaVarName<"<number>">,
HelpText<"Prevent optimization remarks from being output if they do not have at least this profile count">;
def fdiagnostics_show_option : Flag<["-"], "fdiagnostics-show-option">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Print option name with mappable diagnostics">;
def fdiagnostics_show_note_include_stack : Flag<["-"], "fdiagnostics-show-note-include-stack">,
Group<f_Group>, Flags<[CC1Option]>, HelpText<"Display include stacks for diagnostic notes">;
def fdiagnostics_format_EQ : Joined<["-"], "fdiagnostics-format=">, Group<f_clang_Group>;
def fdiagnostics_show_category_EQ : Joined<["-"], "fdiagnostics-show-category=">, Group<f_clang_Group>;
def fdiagnostics_show_template_tree : Flag<["-"], "fdiagnostics-show-template-tree">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Print a template comparison tree for differing templates">;
def fdeclspec : Flag<["-"], "fdeclspec">, Group<f_clang_Group>,
HelpText<"Allow __declspec as a keyword">, Flags<[CC1Option]>;
def fdiscard_value_names : Flag<["-"], "fdiscard-value-names">, Group<f_clang_Group>,
HelpText<"Discard value names in LLVM IR">, Flags<[DriverOption]>;
def fno_discard_value_names : Flag<["-"], "fno-discard-value-names">, Group<f_clang_Group>,
HelpText<"Do not discard value names in LLVM IR">, Flags<[DriverOption]>;
def fdollars_in_identifiers : Flag<["-"], "fdollars-in-identifiers">, Group<f_Group>,
HelpText<"Allow '$' in identifiers">, Flags<[CC1Option]>;
def fdwarf2_cfi_asm : Flag<["-"], "fdwarf2-cfi-asm">, Group<clang_ignored_f_Group>;
def fno_dwarf2_cfi_asm : Flag<["-"], "fno-dwarf2-cfi-asm">, Group<clang_ignored_f_Group>;
def fdwarf_directory_asm : Flag<["-"], "fdwarf-directory-asm">, Group<f_Group>;
def fno_dwarf_directory_asm : Flag<["-"], "fno-dwarf-directory-asm">, Group<f_Group>, Flags<[CC1Option]>;
def felide_constructors : Flag<["-"], "felide-constructors">, Group<f_Group>;
def fno_elide_type : Flag<["-"], "fno-elide-type">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Do not elide types when printing diagnostics">;
def feliminate_unused_debug_symbols : Flag<["-"], "feliminate-unused-debug-symbols">, Group<f_Group>;
def femit_all_decls : Flag<["-"], "femit-all-decls">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Emit all declarations, even if unused">;
def femulated_tls : Flag<["-"], "femulated-tls">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use emutls functions to access thread_local variables">;
def fno_emulated_tls : Flag<["-"], "fno-emulated-tls">, Group<f_Group>, Flags<[CC1Option]>;
def fencoding_EQ : Joined<["-"], "fencoding=">, Group<f_Group>;
def ferror_limit_EQ : Joined<["-"], "ferror-limit=">, Group<f_Group>, Flags<[CoreOption]>;
def fexceptions : Flag<["-"], "fexceptions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable support for exception handling">;
def fdwarf_exceptions : Flag<["-"], "fdwarf-exceptions">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Use DWARF style exceptions">;
def fsjlj_exceptions : Flag<["-"], "fsjlj-exceptions">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Use SjLj style exceptions">;
def fseh_exceptions : Flag<["-"], "fseh-exceptions">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Use SEH style exceptions">;
def fexcess_precision_EQ : Joined<["-"], "fexcess-precision=">,
Group<clang_ignored_gcc_optimization_f_Group>;
def : Flag<["-"], "fexpensive-optimizations">, Group<clang_ignored_gcc_optimization_f_Group>;
def : Flag<["-"], "fno-expensive-optimizations">, Group<clang_ignored_gcc_optimization_f_Group>;
def fextdirs_EQ : Joined<["-"], "fextdirs=">, Group<f_Group>;
def : Flag<["-"], "fdefer-pop">, Group<clang_ignored_gcc_optimization_f_Group>;
def : Flag<["-"], "fno-defer-pop">, Group<clang_ignored_gcc_optimization_f_Group>;
def : Flag<["-"], "fextended-identifiers">, Group<clang_ignored_f_Group>;
def : Flag<["-"], "fno-extended-identifiers">, Group<f_Group>, Flags<[Unsupported]>;
def fhosted : Flag<["-"], "fhosted">, Group<f_Group>;
def fdenormal_fp_math_EQ : Joined<["-"], "fdenormal-fp-math=">, Group<f_Group>, Flags<[CC1Option]>;
def ffast_math : Flag<["-"], "ffast-math">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Allow aggressive, lossy floating-point optimizations">;
def fno_fast_math : Flag<["-"], "fno-fast-math">, Group<f_Group>;
def fmath_errno : Flag<["-"], "fmath-errno">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Require math functions to indicate errors by setting errno">;
def fno_math_errno : Flag<["-"], "fno-math-errno">, Group<f_Group>;
def fbracket_depth_EQ : Joined<["-"], "fbracket-depth=">, Group<f_Group>;
def fsignaling_math : Flag<["-"], "fsignaling-math">, Group<f_Group>;
def fno_signaling_math : Flag<["-"], "fno-signaling-math">, Group<f_Group>;
def fjump_tables : Flag<["-"], "fjump-tables">, Group<f_Group>;
def fno_jump_tables : Flag<["-"], "fno-jump-tables">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Do not use jump tables for lowering switches">;
def fforce_enable_int128 : Flag<["-"], "fforce-enable-int128">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable support for int128_t type">;
def fno_force_enable_int128 : Flag<["-"], "fno-force-enable-int128">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Disable support for int128_t type">;
def ffixed_point : Flag<["-"], "ffixed-point">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Enable fixed point types">;
def fno_fixed_point : Flag<["-"], "fno-fixed-point">, Group<f_Group>,
HelpText<"Disable fixed point types">;
// Begin sanitizer flags. These should all be core options exposed in all driver
// modes.
let Flags = [CC1Option, CoreOption] in {
def fsanitize_EQ : CommaJoined<["-"], "fsanitize=">, Group<f_clang_Group>,
MetaVarName<"<check>">,
HelpText<"Turn on runtime checks for various forms of undefined "
"or suspicious behavior. See user manual for available checks">;
def fno_sanitize_EQ : CommaJoined<["-"], "fno-sanitize=">, Group<f_clang_Group>,
Flags<[CoreOption, DriverOption]>;
def fsanitize_blacklist : Joined<["-"], "fsanitize-blacklist=">,
Group<f_clang_Group>,
HelpText<"Path to blacklist file for sanitizers">;
def fno_sanitize_blacklist : Flag<["-"], "fno-sanitize-blacklist">,
Group<f_clang_Group>,
HelpText<"Don't use blacklist file for sanitizers">;
def fsanitize_coverage
: CommaJoined<["-"], "fsanitize-coverage=">,
Group<f_clang_Group>,
HelpText<"Specify the type of coverage instrumentation for Sanitizers">;
def fno_sanitize_coverage
: CommaJoined<["-"], "fno-sanitize-coverage=">,
Group<f_clang_Group>, Flags<[CoreOption, DriverOption]>,
HelpText<"Disable specified features of coverage instrumentation for "
"Sanitizers">, Values<"func,bb,edge,indirect-calls,trace-bb,trace-cmp,trace-div,trace-gep,8bit-counters,trace-pc,trace-pc-guard,no-prune,inline-8bit-counters">;
def fsanitize_memory_track_origins_EQ : Joined<["-"], "fsanitize-memory-track-origins=">,
Group<f_clang_Group>,
HelpText<"Enable origins tracking in MemorySanitizer">;
def fsanitize_memory_track_origins : Flag<["-"], "fsanitize-memory-track-origins">,
Group<f_clang_Group>,
HelpText<"Enable origins tracking in MemorySanitizer">;
def fno_sanitize_memory_track_origins : Flag<["-"], "fno-sanitize-memory-track-origins">,
Group<f_clang_Group>,
Flags<[CoreOption, DriverOption]>,
HelpText<"Disable origins tracking in MemorySanitizer">;
def fsanitize_memory_use_after_dtor : Flag<["-"], "fsanitize-memory-use-after-dtor">,
Group<f_clang_Group>,
HelpText<"Enable use-after-destroy detection in MemorySanitizer">;
def fno_sanitize_memory_use_after_dtor : Flag<["-"], "fno-sanitize-memory-use-after-dtor">,
Group<f_clang_Group>,
HelpText<"Disable use-after-destroy detection in MemorySanitizer">;
def fsanitize_address_field_padding : Joined<["-"], "fsanitize-address-field-padding=">,
Group<f_clang_Group>,
HelpText<"Level of field padding for AddressSanitizer">;
def fsanitize_address_use_after_scope : Flag<["-"], "fsanitize-address-use-after-scope">,
Group<f_clang_Group>,
HelpText<"Enable use-after-scope detection in AddressSanitizer">;
def fno_sanitize_address_use_after_scope : Flag<["-"], "fno-sanitize-address-use-after-scope">,
Group<f_clang_Group>,
Flags<[CoreOption, DriverOption]>,
HelpText<"Disable use-after-scope detection in AddressSanitizer">;
def fsanitize_address_poison_class_member_array_new_cookie
: Flag<[ "-" ], "fsanitize-address-poison-class-member-array-new-cookie">,
Group<f_clang_Group>,
HelpText<"Enable poisoning array cookies when using class member operator new[] in AddressSanitizer">;
def fno_sanitize_address_poison_class_member_array_new_cookie
: Flag<[ "-" ], "fno-sanitize-address-poison-class-member-array-new-cookie">,
Group<f_clang_Group>,
HelpText<"Disable poisoning array cookies when using class member operator new[] in AddressSanitizer">;
def fsanitize_address_globals_dead_stripping : Flag<["-"], "fsanitize-address-globals-dead-stripping">,
Group<f_clang_Group>,
HelpText<"Enable linker dead stripping of globals in AddressSanitizer">;
def fsanitize_recover : Flag<["-"], "fsanitize-recover">, Group<f_clang_Group>;
def fno_sanitize_recover : Flag<["-"], "fno-sanitize-recover">,
Flags<[CoreOption, DriverOption]>,
Group<f_clang_Group>;
def fsanitize_recover_EQ : CommaJoined<["-"], "fsanitize-recover=">,
Group<f_clang_Group>,
HelpText<"Enable recovery for specified sanitizers">;
def fno_sanitize_recover_EQ
: CommaJoined<["-"], "fno-sanitize-recover=">,
Group<f_clang_Group>,
Flags<[CoreOption, DriverOption]>,
HelpText<"Disable recovery for specified sanitizers">;
def fsanitize_trap_EQ : CommaJoined<["-"], "fsanitize-trap=">, Group<f_clang_Group>,
HelpText<"Enable trapping for specified sanitizers">;
def fno_sanitize_trap_EQ : CommaJoined<["-"], "fno-sanitize-trap=">, Group<f_clang_Group>,
Flags<[CoreOption, DriverOption]>,
HelpText<"Disable trapping for specified sanitizers">;
def fsanitize_undefined_trap_on_error : Flag<["-"], "fsanitize-undefined-trap-on-error">,
Group<f_clang_Group>;
def fno_sanitize_undefined_trap_on_error : Flag<["-"], "fno-sanitize-undefined-trap-on-error">,
Group<f_clang_Group>;
def fsanitize_minimal_runtime : Flag<["-"], "fsanitize-minimal-runtime">,
Group<f_clang_Group>;
def fno_sanitize_minimal_runtime : Flag<["-"], "fno-sanitize-minimal-runtime">,
Group<f_clang_Group>;
def fsanitize_link_cxx_runtime : Flag<["-"], "fsanitize-link-c++-runtime">,
Group<f_clang_Group>;
def fsanitize_cfi_cross_dso : Flag<["-"], "fsanitize-cfi-cross-dso">,
Group<f_clang_Group>,
HelpText<"Enable control flow integrity (CFI) checks for cross-DSO calls.">;
def fno_sanitize_cfi_cross_dso : Flag<["-"], "fno-sanitize-cfi-cross-dso">,
Flags<[CoreOption, DriverOption]>,
Group<f_clang_Group>,
HelpText<"Disable control flow integrity (CFI) checks for cross-DSO calls.">;
def fsanitize_cfi_icall_generalize_pointers : Flag<["-"], "fsanitize-cfi-icall-generalize-pointers">,
Group<f_clang_Group>,
HelpText<"Generalize pointers in CFI indirect call type signature checks">;
def fsanitize_stats : Flag<["-"], "fsanitize-stats">,
Group<f_clang_Group>,
HelpText<"Enable sanitizer statistics gathering.">;
def fno_sanitize_stats : Flag<["-"], "fno-sanitize-stats">,
Group<f_clang_Group>,
Flags<[CoreOption, DriverOption]>,
HelpText<"Disable sanitizer statistics gathering.">;
def fsanitize_thread_memory_access : Flag<["-"], "fsanitize-thread-memory-access">,
Group<f_clang_Group>,
HelpText<"Enable memory access instrumentation in ThreadSanitizer (default)">;
def fno_sanitize_thread_memory_access : Flag<["-"], "fno-sanitize-thread-memory-access">,
Group<f_clang_Group>,
Flags<[CoreOption, DriverOption]>,
HelpText<"Disable memory access instrumentation in ThreadSanitizer">;
def fsanitize_thread_func_entry_exit : Flag<["-"], "fsanitize-thread-func-entry-exit">,
Group<f_clang_Group>,
HelpText<"Enable function entry/exit instrumentation in ThreadSanitizer (default)">;
def fno_sanitize_thread_func_entry_exit : Flag<["-"], "fno-sanitize-thread-func-entry-exit">,
Group<f_clang_Group>,
Flags<[CoreOption, DriverOption]>,
HelpText<"Disable function entry/exit instrumentation in ThreadSanitizer">;
def fsanitize_thread_atomics : Flag<["-"], "fsanitize-thread-atomics">,
Group<f_clang_Group>,
HelpText<"Enable atomic operations instrumentation in ThreadSanitizer (default)">;
def fno_sanitize_thread_atomics : Flag<["-"], "fno-sanitize-thread-atomics">,
Group<f_clang_Group>,
Flags<[CoreOption, DriverOption]>,
HelpText<"Disable atomic operations instrumentation in ThreadSanitizer">;
def fsanitize_undefined_strip_path_components_EQ : Joined<["-"], "fsanitize-undefined-strip-path-components=">,
Group<f_clang_Group>, MetaVarName<"<number>">,
HelpText<"Strip (or keep only, if negative) a given number of path components "
"when emitting check metadata.">;
} // end -f[no-]sanitize* flags
def funsafe_math_optimizations : Flag<["-"], "funsafe-math-optimizations">,
Group<f_Group>;
def fno_unsafe_math_optimizations : Flag<["-"], "fno-unsafe-math-optimizations">,
Group<f_Group>;
def fassociative_math : Flag<["-"], "fassociative-math">, Group<f_Group>;
def fno_associative_math : Flag<["-"], "fno-associative-math">, Group<f_Group>;
def freciprocal_math :
Flag<["-"], "freciprocal-math">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Allow division operations to be reassociated">;
def fno_reciprocal_math : Flag<["-"], "fno-reciprocal-math">, Group<f_Group>;
def ffinite_math_only : Flag<["-"], "ffinite-math-only">, Group<f_Group>, Flags<[CC1Option]>;
def fno_finite_math_only : Flag<["-"], "fno-finite-math-only">, Group<f_Group>;
def fsigned_zeros : Flag<["-"], "fsigned-zeros">, Group<f_Group>;
def fno_signed_zeros :
Flag<["-"], "fno-signed-zeros">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Allow optimizations that ignore the sign of floating point zeros">;
def fhonor_nans : Flag<["-"], "fhonor-nans">, Group<f_Group>;
def fno_honor_nans : Flag<["-"], "fno-honor-nans">, Group<f_Group>;
def fhonor_infinities : Flag<["-"], "fhonor-infinities">, Group<f_Group>;
def fno_honor_infinities : Flag<["-"], "fno-honor-infinities">, Group<f_Group>;
// This option was originally misspelt "infinites" [sic].
def : Flag<["-"], "fhonor-infinites">, Alias<fhonor_infinities>;
def : Flag<["-"], "fno-honor-infinites">, Alias<fno_honor_infinities>;
def ftrapping_math : Flag<["-"], "ftrapping-math">, Group<f_Group>, Flags<[CC1Option]>;
def fno_trapping_math : Flag<["-"], "fno-trapping-math">, Group<f_Group>, Flags<[CC1Option]>;
def ffp_contract : Joined<["-"], "ffp-contract=">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Form fused FP ops (e.g. FMAs): fast (everywhere)"
" | on (according to FP_CONTRACT pragma, default) | off (never fuse)">, Values<"fast,on,off">;
def fstrict_float_cast_overflow : Flag<["-"],
"fstrict-float-cast-overflow">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Assume that overflowing float-to-int casts are undefined (default)">;
def fno_strict_float_cast_overflow : Flag<["-"],
"fno-strict-float-cast-overflow">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Relax language rules and try to match the behavior of the target's native float-to-int conversion instructions">;
def ffor_scope : Flag<["-"], "ffor-scope">, Group<f_Group>;
def fno_for_scope : Flag<["-"], "fno-for-scope">, Group<f_Group>;
def frewrite_includes : Flag<["-"], "frewrite-includes">, Group<f_Group>,
Flags<[CC1Option]>;
def fno_rewrite_includes : Flag<["-"], "fno-rewrite-includes">, Group<f_Group>;
def frewrite_imports : Flag<["-"], "frewrite-imports">, Group<f_Group>,
Flags<[CC1Option]>;
def fno_rewrite_imports : Flag<["-"], "fno-rewrite-imports">, Group<f_Group>;
def fdelete_null_pointer_checks : Flag<["-"],
"fdelete-null-pointer-checks">, Group<f_Group>,
HelpText<"Treat usage of null pointers as undefined behavior.">;
def fno_delete_null_pointer_checks : Flag<["-"],
"fno-delete-null-pointer-checks">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Do not treat usage of null pointers as undefined behavior.">;
def frewrite_map_file : Separate<["-"], "frewrite-map-file">,
Group<f_Group>,
Flags<[ DriverOption, CC1Option ]>;
def frewrite_map_file_EQ : Joined<["-"], "frewrite-map-file=">,
Group<f_Group>,
Flags<[DriverOption]>;
def fuse_line_directives : Flag<["-"], "fuse-line-directives">, Group<f_Group>,
Flags<[CC1Option]>;
def fno_use_line_directives : Flag<["-"], "fno-use-line-directives">, Group<f_Group>;
def ffreestanding : Flag<["-"], "ffreestanding">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Assert that the compilation takes place in a freestanding environment">;
def fgnu_keywords : Flag<["-"], "fgnu-keywords">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Allow GNU-extension keywords regardless of language standard">;
def fgnu89_inline : Flag<["-"], "fgnu89-inline">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use the gnu89 inline semantics">;
def fno_gnu89_inline : Flag<["-"], "fno-gnu89-inline">, Group<f_Group>;
def fgnu_runtime : Flag<["-"], "fgnu-runtime">, Group<f_Group>,
HelpText<"Generate output compatible with the standard GNU Objective-C runtime">;
def fheinous_gnu_extensions : Flag<["-"], "fheinous-gnu-extensions">, Flags<[CC1Option]>;
def filelist : Separate<["-"], "filelist">, Flags<[LinkerInput]>,
Group<Link_Group>;
def : Flag<["-"], "findirect-virtual-calls">, Alias<fapple_kext>;
def finline_functions : Flag<["-"], "finline-functions">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Inline suitable functions">;
def finline_hint_functions: Flag<["-"], "finline-hint-functions">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Inline functions which are (explicitly or implicitly) marked inline">;
def finline : Flag<["-"], "finline">, Group<clang_ignored_f_Group>;
def fexperimental_isel : Flag<["-"], "fexperimental-isel">, Group<f_clang_Group>,
HelpText<"Enables the experimental global instruction selector">;
def fexperimental_new_pass_manager : Flag<["-"], "fexperimental-new-pass-manager">,
Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Enables an experimental new pass manager in LLVM.">;
def finput_charset_EQ : Joined<["-"], "finput-charset=">, Group<f_Group>;
def fexec_charset_EQ : Joined<["-"], "fexec-charset=">, Group<f_Group>;
def finstrument_functions : Flag<["-"], "finstrument-functions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Generate calls to instrument function entry and exit">;
def finstrument_functions_after_inlining : Flag<["-"], "finstrument-functions-after-inlining">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Like -finstrument-functions, but insert the calls after inlining">;
def finstrument_function_entry_bare : Flag<["-"], "finstrument-function-entry-bare">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Instrument function entry only, after inlining, without arguments to the instrumentation call">;
def fcf_protection_EQ : Joined<["-"], "fcf-protection=">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
HelpText<"Instrument control-flow architecture protection. Options: return, branch, full, none.">, Values<"return,branch,full,none">;
def fcf_protection : Flag<["-"], "fcf-protection">, Group<f_Group>, Flags<[CoreOption, CC1Option]>,
Alias<fcf_protection_EQ>, AliasArgs<["full"]>,
HelpText<"Enable cf-protection in 'full' mode">;
def fxray_instrument : Flag<["-"], "fxray-instrument">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Generate XRay instrumentation sleds on function entry and exit">;
def fnoxray_instrument : Flag<["-"], "fno-xray-instrument">, Group<f_Group>,
Flags<[CC1Option]>;
def fxray_instruction_threshold_EQ :
JoinedOrSeparate<["-"], "fxray-instruction-threshold=">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Sets the minimum function size to instrument with XRay">;
def fxray_instruction_threshold_ :
JoinedOrSeparate<["-"], "fxray-instruction-threshold">,
Group<f_Group>, Flags<[CC1Option]>;
def fxray_always_instrument :
JoinedOrSeparate<["-"], "fxray-always-instrument=">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"DEPRECATED: Filename defining the whitelist for imbuing the 'always instrument' XRay attribute.">;
def fxray_never_instrument :
JoinedOrSeparate<["-"], "fxray-never-instrument=">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"DEPRECATED: Filename defining the whitelist for imbuing the 'never instrument' XRay attribute.">;
def fxray_attr_list :
JoinedOrSeparate<["-"], "fxray-attr-list=">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Filename defining the list of functions/types for imbuing XRay attributes.">;
def fxray_modes :
JoinedOrSeparate<["-"], "fxray-modes=">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"List of modes to link in by default into XRay instrumented binaries.">;
def fxray_always_emit_customevents : Flag<["-"], "fxray-always-emit-customevents">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Determine whether to always emit __xray_customevent(...) calls even if the function it appears in is not always instrumented.">;
def fnoxray_always_emit_customevents : Flag<["-"], "fno-xray-always-emit-customevents">, Group<f_Group>,
Flags<[CC1Option]>;
def fxray_always_emit_typedevents : Flag<["-"], "fxray-always-emit-typedevents">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Determine whether to always emit __xray_typedevent(...) calls even if the function it appears in is not always instrumented.">;
def fnoxray_always_emit_typedevents : Flag<["-"], "fno-xray-always-emit-typedevents">, Group<f_Group>,
Flags<[CC1Option]>;
def fxray_link_deps : Flag<["-"], "fxray-link-deps">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Tells clang to add the link dependencies for XRay.">;
def fnoxray_link_deps : Flag<["-"], "fnoxray-link-deps">, Group<f_Group>,
Flags<[CC1Option]>;
def fxray_instrumentation_bundle :
JoinedOrSeparate<["-"], "fxray-instrumentation-bundle=">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Select which XRay instrumentation points to emit. Options: all, none, function, custom. Default is 'all'.">;
def ffine_grained_bitfield_accesses : Flag<["-"],
"ffine-grained-bitfield-accesses">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Use separate accesses for consecutive bitfield runs with legal widths and alignments.">;
def fno_fine_grained_bitfield_accesses : Flag<["-"],
"fno-fine-grained-bitfield-accesses">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Use large-integer access for consecutive bitfield runs.">;
def flat__namespace : Flag<["-"], "flat_namespace">;
def flax_vector_conversions : Flag<["-"], "flax-vector-conversions">, Group<f_Group>;
def flimited_precision_EQ : Joined<["-"], "flimited-precision=">, Group<f_Group>;
def flto_EQ : Joined<["-"], "flto=">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
HelpText<"Set LTO mode to either 'full' or 'thin'">, Values<"thin,full">;
def flto : Flag<["-"], "flto">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
HelpText<"Enable LTO in 'full' mode">;
def fno_lto : Flag<["-"], "fno-lto">, Group<f_Group>,
HelpText<"Disable LTO mode (default)">;
def flto_jobs_EQ : Joined<["-"], "flto-jobs=">,
Flags<[CC1Option]>, Group<f_Group>,
HelpText<"Controls the backend parallelism of -flto=thin (default "
"of 0 means the number of threads will be derived from "
"the number of CPUs detected)">;
def fthinlto_index_EQ : Joined<["-"], "fthinlto-index=">,
Flags<[CC1Option]>, Group<f_Group>,
HelpText<"Perform ThinLTO importing using provided function summary index">;
def fmacro_backtrace_limit_EQ : Joined<["-"], "fmacro-backtrace-limit=">,
Group<f_Group>, Flags<[DriverOption, CoreOption]>;
def fmerge_all_constants : Flag<["-"], "fmerge-all-constants">, Group<f_Group>,
Flags<[CC1Option, CoreOption]>, HelpText<"Allow merging of constants">;
def fmessage_length_EQ : Joined<["-"], "fmessage-length=">, Group<f_Group>;
def fms_extensions : Flag<["-"], "fms-extensions">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Accept some non-standard constructs supported by the Microsoft compiler">;
def fms_compatibility : Flag<["-"], "fms-compatibility">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Enable full Microsoft Visual C++ compatibility">;
def fms_volatile : Joined<["-"], "fms-volatile">, Group<f_Group>, Flags<[CC1Option]>;
def fmsc_version : Joined<["-"], "fmsc-version=">, Group<f_Group>, Flags<[DriverOption, CoreOption]>,
HelpText<"Microsoft compiler version number to report in _MSC_VER (0 = don't define it (default))">;
def fms_compatibility_version
: Joined<["-"], "fms-compatibility-version=">,
Group<f_Group>,
Flags<[ CC1Option, CoreOption ]>,
HelpText<"Dot-separated value representing the Microsoft compiler "
"version number to report in _MSC_VER (0 = don't define it "
"(default))">;
def fdelayed_template_parsing : Flag<["-"], "fdelayed-template-parsing">, Group<f_Group>,
HelpText<"Parse templated function definitions at the end of the "
"translation unit">, Flags<[CC1Option, CoreOption]>;
def fms_memptr_rep_EQ : Joined<["-"], "fms-memptr-rep=">, Group<f_Group>, Flags<[CC1Option]>;
def fmodules_cache_path : Joined<["-"], "fmodules-cache-path=">, Group<i_Group>,
Flags<[DriverOption, CC1Option]>, MetaVarName<"<directory>">,
HelpText<"Specify the module cache path">;
def fmodules_user_build_path : Separate<["-"], "fmodules-user-build-path">, Group<i_Group>,
Flags<[DriverOption, CC1Option]>, MetaVarName<"<directory>">,
HelpText<"Specify the module user build path">;
def fprebuilt_module_path : Joined<["-"], "fprebuilt-module-path=">, Group<i_Group>,
Flags<[DriverOption, CC1Option]>, MetaVarName<"<directory>">,
HelpText<"Specify the prebuilt module path">;
def fmodules_prune_interval : Joined<["-"], "fmodules-prune-interval=">, Group<i_Group>,
Flags<[CC1Option]>, MetaVarName<"<seconds>">,
HelpText<"Specify the interval (in seconds) between attempts to prune the module cache">;
def fmodules_prune_after : Joined<["-"], "fmodules-prune-after=">, Group<i_Group>,
Flags<[CC1Option]>, MetaVarName<"<seconds>">,
HelpText<"Specify the interval (in seconds) after which a module file will be considered unused">;
def fmodules_search_all : Flag <["-"], "fmodules-search-all">, Group<f_Group>,
Flags<[DriverOption, CC1Option]>,
HelpText<"Search even non-imported modules to resolve references">;
def fbuild_session_timestamp : Joined<["-"], "fbuild-session-timestamp=">,
Group<i_Group>, Flags<[CC1Option]>, MetaVarName<"<time since Epoch in seconds>">,
HelpText<"Time when the current build session started">;
def fbuild_session_file : Joined<["-"], "fbuild-session-file=">,
Group<i_Group>, MetaVarName<"<file>">,
HelpText<"Use the last modification time of <file> as the build session timestamp">;
def fmodules_validate_once_per_build_session : Flag<["-"], "fmodules-validate-once-per-build-session">,
Group<i_Group>, Flags<[CC1Option]>,
HelpText<"Don't verify input files for the modules if the module has been "
"successfully validated or loaded during this build session">;
def fmodules_disable_diagnostic_validation : Flag<["-"], "fmodules-disable-diagnostic-validation">,
Group<i_Group>, Flags<[CC1Option]>,
HelpText<"Disable validation of the diagnostic options when loading the module">;
def fmodules_validate_system_headers : Flag<["-"], "fmodules-validate-system-headers">,
Group<i_Group>, Flags<[CC1Option]>,
HelpText<"Validate the system headers that a module depends on when loading the module">;
def fno_modules_validate_system_headers : Flag<["-"], "fno-modules-validate-system-headers">,
Group<i_Group>, Flags<[DriverOption]>;
def fmodules : Flag <["-"], "fmodules">, Group<f_Group>,
Flags<[DriverOption, CC1Option]>,
HelpText<"Enable the 'modules' language feature">;
def fimplicit_module_maps : Flag <["-"], "fimplicit-module-maps">, Group<f_Group>,
Flags<[DriverOption, CC1Option]>,
HelpText<"Implicitly search the file system for module map files.">;
def fmodules_ts : Flag <["-"], "fmodules-ts">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Enable support for the C++ Modules TS">;
def fmodule_maps : Flag <["-"], "fmodule-maps">, Alias<fimplicit_module_maps>;
def fmodule_name_EQ : Joined<["-"], "fmodule-name=">, Group<f_Group>,
Flags<[DriverOption,CC1Option]>, MetaVarName<"<name>">,
HelpText<"Specify the name of the module to build">;
def fmodule_name : Separate<["-"], "fmodule-name">, Alias<fmodule_name_EQ>;
def fmodule_implementation_of : Separate<["-"], "fmodule-implementation-of">,
Flags<[CC1Option]>, Alias<fmodule_name_EQ>;
def fmodule_map_file : Joined<["-"], "fmodule-map-file=">,
Group<f_Group>, Flags<[DriverOption,CC1Option]>, MetaVarName<"<file>">,
HelpText<"Load this module map file">;
def fmodule_file : Joined<["-"], "fmodule-file=">,
Group<i_Group>, Flags<[DriverOption,CC1Option]>, MetaVarName<"[<name>=]<file>">,
HelpText<"Specify the mapping of module name to precompiled module file, or load a module file if name is omitted.">;
def fmodules_ignore_macro : Joined<["-"], "fmodules-ignore-macro=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Ignore the definition of the given macro when building and loading modules">;
def fmodules_decluse : Flag <["-"], "fmodules-decluse">, Group<f_Group>,
Flags<[DriverOption,CC1Option]>,
HelpText<"Require declaration of modules used within a module">;
def fmodules_strict_decluse : Flag <["-"], "fmodules-strict-decluse">, Group<f_Group>,
Flags<[DriverOption,CC1Option]>,
HelpText<"Like -fmodules-decluse but requires all headers to be in modules">;
def fno_modules_search_all : Flag <["-"], "fno-modules-search-all">, Group<f_Group>,
Flags<[DriverOption, CC1Option]>;
def fno_implicit_modules :
Flag <["-"], "fno-implicit-modules">,
Group<f_Group>, Flags<[DriverOption, CC1Option]>;
def fretain_comments_from_system_headers : Flag<["-"], "fretain-comments-from-system-headers">, Group<f_Group>, Flags<[CC1Option]>;
def fmudflapth : Flag<["-"], "fmudflapth">, Group<f_Group>;
def fmudflap : Flag<["-"], "fmudflap">, Group<f_Group>;
def fnested_functions : Flag<["-"], "fnested-functions">, Group<f_Group>;
def fnext_runtime : Flag<["-"], "fnext-runtime">, Group<f_Group>;
def fno_access_control : Flag<["-"], "fno-access-control">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Disable C++ access control">;
def fno_apple_pragma_pack : Flag<["-"], "fno-apple-pragma-pack">, Group<f_Group>;
def fno_asm : Flag<["-"], "fno-asm">, Group<f_Group>;
def fno_asynchronous_unwind_tables : Flag<["-"], "fno-asynchronous-unwind-tables">, Group<f_Group>;
def fno_assume_sane_operator_new : Flag<["-"], "fno-assume-sane-operator-new">, Group<f_Group>,
HelpText<"Don't assume that C++'s global operator new can't alias any pointer">,
Flags<[CC1Option]>;
def fno_blocks : Flag<["-"], "fno-blocks">, Group<f_Group>, Flags<[CoreOption]>;
def fno_borland_extensions : Flag<["-"], "fno-borland-extensions">, Group<f_Group>;
def fno_builtin : Flag<["-"], "fno-builtin">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Disable implicit builtin knowledge of functions">;
def fno_builtin_ : Joined<["-"], "fno-builtin-">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Disable implicit builtin knowledge of a specific function">;
def fno_caret_diagnostics : Flag<["-"], "fno-caret-diagnostics">, Group<f_Group>,
Flags<[CC1Option]>;
def fno_color_diagnostics : Flag<["-"], "fno-color-diagnostics">, Group<f_Group>,
Flags<[CoreOption, CC1Option]>;
def fno_diagnostics_color : Flag<["-"], "fno-diagnostics-color">, Group<f_Group>,
Flags<[CoreOption, DriverOption]>;
def fno_common : Flag<["-"], "fno-common">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Compile common globals like normal definitions">;
def fno_constant_cfstrings : Flag<["-"], "fno-constant-cfstrings">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Disable creation of CodeFoundation-type constant strings">;
def fno_cxx_exceptions: Flag<["-"], "fno-cxx-exceptions">, Group<f_Group>;
def fno_cxx_modules : Flag <["-"], "fno-cxx-modules">, Group<f_Group>,
Flags<[DriverOption]>;
def fno_diagnostics_fixit_info : Flag<["-"], "fno-diagnostics-fixit-info">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Do not include fixit information in diagnostics">;
def fno_diagnostics_show_hotness : Flag<["-"], "fno-diagnostics-show-hotness">, Group<f_Group>;
def fno_diagnostics_show_option : Flag<["-"], "fno-diagnostics-show-option">, Group<f_Group>;
def fno_diagnostics_show_note_include_stack : Flag<["-"], "fno-diagnostics-show-note-include-stack">,
Flags<[CC1Option]>, Group<f_Group>;
def fdigraphs : Flag<["-"], "fdigraphs">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable alternative token representations '<:', ':>', '<%', '%>', '%:', '%:%:' (default)">;
def fno_digraphs : Flag<["-"], "fno-digraphs">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Disallow alternative token representations '<:', ':>', '<%', '%>', '%:', '%:%:'">;
def fno_declspec : Flag<["-"], "fno-declspec">, Group<f_clang_Group>,
HelpText<"Disallow __declspec as a keyword">, Flags<[CC1Option]>;
def fno_dollars_in_identifiers : Flag<["-"], "fno-dollars-in-identifiers">, Group<f_Group>,
HelpText<"Disallow '$' in identifiers">, Flags<[CC1Option]>;
def fno_elide_constructors : Flag<["-"], "fno-elide-constructors">, Group<f_Group>,
HelpText<"Disable C++ copy constructor elision">, Flags<[CC1Option]>;
def fno_eliminate_unused_debug_symbols : Flag<["-"], "fno-eliminate-unused-debug-symbols">, Group<f_Group>;
def fno_exceptions : Flag<["-"], "fno-exceptions">, Group<f_Group>;
def fno_gnu_keywords : Flag<["-"], "fno-gnu-keywords">, Group<f_Group>, Flags<[CC1Option]>;
def fno_inline_functions : Flag<["-"], "fno-inline-functions">, Group<f_clang_Group>, Flags<[CC1Option]>;
def fno_inline : Flag<["-"], "fno-inline">, Group<f_clang_Group>, Flags<[CC1Option]>;
def fno_experimental_isel : Flag<["-"], "fno-experimental-isel">, Group<f_clang_Group>,
HelpText<"Disables the experimental global instruction selector">;
def fno_experimental_new_pass_manager : Flag<["-"], "fno-experimental-new-pass-manager">,
Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Disables an experimental new pass manager in LLVM.">;
def fveclib : Joined<["-"], "fveclib=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use the given vector functions library">, Values<"Accelerate,SVML,none">;
def fno_lax_vector_conversions : Flag<["-"], "fno-lax-vector-conversions">, Group<f_Group>,
HelpText<"Disallow implicit conversions between vectors with a different number of elements or different element types">, Flags<[CC1Option]>;
def fno_merge_all_constants : Flag<["-"], "fno-merge-all-constants">, Group<f_Group>,
HelpText<"Disallow merging of constants">;
def fno_modules : Flag <["-"], "fno-modules">, Group<f_Group>,
Flags<[DriverOption]>;
def fno_implicit_module_maps : Flag <["-"], "fno-implicit-module-maps">, Group<f_Group>,
Flags<[DriverOption]>;
def fno_module_maps : Flag <["-"], "fno-module-maps">, Alias<fno_implicit_module_maps>;
def fno_modules_decluse : Flag <["-"], "fno-modules-decluse">, Group<f_Group>,
Flags<[DriverOption]>;
def fno_modules_strict_decluse : Flag <["-"], "fno-strict-modules-decluse">, Group<f_Group>,
Flags<[DriverOption]>;
def fimplicit_modules : Flag <["-"], "fimplicit-modules">, Group<f_Group>,
Flags<[DriverOption]>;
def fmodule_file_deps : Flag <["-"], "fmodule-file-deps">, Group<f_Group>,
Flags<[DriverOption]>;
def fno_module_file_deps : Flag <["-"], "fno-module-file-deps">, Group<f_Group>,
Flags<[DriverOption]>;
def fno_ms_extensions : Flag<["-"], "fno-ms-extensions">, Group<f_Group>,
Flags<[CoreOption]>;
def fno_ms_compatibility : Flag<["-"], "fno-ms-compatibility">, Group<f_Group>,
Flags<[CoreOption]>;
def fno_delayed_template_parsing : Flag<["-"], "fno-delayed-template-parsing">, Group<f_Group>,
HelpText<"Disable delayed template parsing">,
Flags<[DriverOption, CoreOption]>;
def fno_objc_exceptions: Flag<["-"], "fno-objc-exceptions">, Group<f_Group>;
def fno_objc_legacy_dispatch : Flag<["-"], "fno-objc-legacy-dispatch">, Group<f_Group>;
def fno_objc_weak : Flag<["-"], "fno-objc-weak">, Group<f_Group>, Flags<[CC1Option]>;
def fno_omit_frame_pointer : Flag<["-"], "fno-omit-frame-pointer">, Group<f_Group>;
def fno_operator_names : Flag<["-"], "fno-operator-names">, Group<f_Group>,
HelpText<"Do not treat C++ operator name keywords as synonyms for operators">,
Flags<[CC1Option]>;
def fno_pascal_strings : Flag<["-"], "fno-pascal-strings">, Group<f_Group>;
def fno_rtti : Flag<["-"], "fno-rtti">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Disable generation of rtti information">;
def fno_rtti_data : Flag<["-"], "fno-rtti-data">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Control emission of RTTI data">;
def fno_short_enums : Flag<["-"], "fno-short-enums">, Group<f_Group>;
def fno_show_column : Flag<["-"], "fno-show-column">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Do not include column number on diagnostics">;
def fno_show_source_location : Flag<["-"], "fno-show-source-location">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Do not include source location information with diagnostics">;
def fdiagnostics_absolute_paths : Flag<["-"], "fdiagnostics-absolute-paths">, Group<f_Group>,
Flags<[CC1Option, CoreOption]>, HelpText<"Print absolute paths in diagnostics">;
def fno_spell_checking : Flag<["-"], "fno-spell-checking">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Disable spell-checking">;
def fno_stack_protector : Flag<["-"], "fno-stack-protector">, Group<f_Group>,
HelpText<"Disable the use of stack protectors">;
def fno_strict_aliasing : Flag<["-"], "fno-strict-aliasing">, Group<f_Group>,
Flags<[DriverOption, CoreOption]>;
def fstruct_path_tbaa : Flag<["-"], "fstruct-path-tbaa">, Group<f_Group>;
def fno_struct_path_tbaa : Flag<["-"], "fno-struct-path-tbaa">, Group<f_Group>;
def fno_strict_enums : Flag<["-"], "fno-strict-enums">, Group<f_Group>;
def fno_strict_vtable_pointers: Flag<["-"], "fno-strict-vtable-pointers">,
Group<f_Group>;
def fno_strict_overflow : Flag<["-"], "fno-strict-overflow">, Group<f_Group>;
def fno_threadsafe_statics : Flag<["-"], "fno-threadsafe-statics">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Do not emit code to make initialization of local statics thread safe">;
def fno_use_cxa_atexit : Flag<["-"], "fno-use-cxa-atexit">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Don't use __cxa_atexit for calling destructors">;
def fno_register_global_dtors_with_atexit : Flag<["-"], "fno-register-global-dtors-with-atexit">, Group<f_Group>,
HelpText<"Don't use atexit or __cxa_atexit to register global destructors">;
def fno_use_init_array : Flag<["-"], "fno-use-init-array">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Don't use .init_array instead of .ctors">;
def fno_unit_at_a_time : Flag<["-"], "fno-unit-at-a-time">, Group<f_Group>;
def fno_unwind_tables : Flag<["-"], "fno-unwind-tables">, Group<f_Group>;
def fno_verbose_asm : Flag<["-"], "fno-verbose-asm">, Group<f_Group>;
def fno_working_directory : Flag<["-"], "fno-working-directory">, Group<f_Group>;
def fno_wrapv : Flag<["-"], "fno-wrapv">, Group<f_Group>;
def fno_zero_initialized_in_bss : Flag<["-"], "fno-zero-initialized-in-bss">, Group<f_Group>;
def fobjc_arc : Flag<["-"], "fobjc-arc">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Synthesize retain and release calls for Objective-C pointers">;
def fno_objc_arc : Flag<["-"], "fno-objc-arc">, Group<f_Group>;
def fobjc_arc_exceptions : Flag<["-"], "fobjc-arc-exceptions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use EH-safe code when synthesizing retains and releases in -fobjc-arc">;
def fno_objc_arc_exceptions : Flag<["-"], "fno-objc-arc-exceptions">, Group<f_Group>;
def fobjc_atdefs : Flag<["-"], "fobjc-atdefs">, Group<clang_ignored_f_Group>;
def fobjc_call_cxx_cdtors : Flag<["-"], "fobjc-call-cxx-cdtors">, Group<clang_ignored_f_Group>;
def fobjc_exceptions: Flag<["-"], "fobjc-exceptions">, Group<f_Group>,
HelpText<"Enable Objective-C exceptions">, Flags<[CC1Option]>;
def fapplication_extension : Flag<["-"], "fapplication-extension">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Restrict code to those available for App Extensions">;
def fno_application_extension : Flag<["-"], "fno-application-extension">,
Group<f_Group>;
def frelaxed_template_template_args : Flag<["-"], "frelaxed-template-template-args">,
Flags<[CC1Option]>, HelpText<"Enable C++17 relaxed template template argument matching">,
Group<f_Group>;
def fno_relaxed_template_template_args : Flag<["-"], "fno-relaxed-template-template-args">,
Group<f_Group>;
def fsized_deallocation : Flag<["-"], "fsized-deallocation">, Flags<[CC1Option]>,
HelpText<"Enable C++14 sized global deallocation functions">, Group<f_Group>;
def fno_sized_deallocation: Flag<["-"], "fno-sized-deallocation">, Group<f_Group>;
def faligned_allocation : Flag<["-"], "faligned-allocation">, Flags<[CC1Option]>,
HelpText<"Enable C++17 aligned allocation functions">, Group<f_Group>;
def fno_aligned_allocation: Flag<["-"], "fno-aligned-allocation">,
Group<f_Group>, Flags<[CC1Option]>;
def fnew_alignment_EQ : Joined<["-"], "fnew-alignment=">,
HelpText<"Specifies the largest alignment guaranteed by '::operator new(size_t)'">,
MetaVarName<"<align>">, Group<f_Group>, Flags<[CC1Option]>;
def : Separate<["-"], "fnew-alignment">, Alias<fnew_alignment_EQ>;
def : Flag<["-"], "faligned-new">, Alias<faligned_allocation>;
def : Flag<["-"], "fno-aligned-new">, Alias<fno_aligned_allocation>;
def faligned_new_EQ : Joined<["-"], "faligned-new=">;
def fobjc_legacy_dispatch : Flag<["-"], "fobjc-legacy-dispatch">, Group<f_Group>;
def fobjc_new_property : Flag<["-"], "fobjc-new-property">, Group<clang_ignored_f_Group>;
def fobjc_infer_related_result_type : Flag<["-"], "fobjc-infer-related-result-type">,
Group<f_Group>;
def fno_objc_infer_related_result_type : Flag<["-"],
"fno-objc-infer-related-result-type">, Group<f_Group>,
HelpText<
"do not infer Objective-C related result type based on method family">,
Flags<[CC1Option]>;
def fobjc_link_runtime: Flag<["-"], "fobjc-link-runtime">, Group<f_Group>;
def fobjc_weak : Flag<["-"], "fobjc-weak">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable ARC-style weak references in Objective-C">;
// Objective-C ABI options.
def fobjc_runtime_EQ : Joined<["-"], "fobjc-runtime=">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Specify the target Objective-C runtime kind and version">;
def fobjc_abi_version_EQ : Joined<["-"], "fobjc-abi-version=">, Group<f_Group>;
def fobjc_nonfragile_abi_version_EQ : Joined<["-"], "fobjc-nonfragile-abi-version=">, Group<f_Group>;
def fobjc_nonfragile_abi : Flag<["-"], "fobjc-nonfragile-abi">, Group<f_Group>;
def fno_objc_nonfragile_abi : Flag<["-"], "fno-objc-nonfragile-abi">, Group<f_Group>;
def fobjc_sender_dependent_dispatch : Flag<["-"], "fobjc-sender-dependent-dispatch">, Group<f_Group>;
def fomit_frame_pointer : Flag<["-"], "fomit-frame-pointer">, Group<f_Group>;
def fopenmp : Flag<["-"], "fopenmp">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused]>,
HelpText<"Parse OpenMP pragmas and generate parallel code.">;
def fno_openmp : Flag<["-"], "fno-openmp">, Group<f_Group>, Flags<[NoArgumentUnused]>;
def fopenmp_version_EQ : Joined<["-"], "fopenmp-version=">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused]>;
def fopenmp_EQ : Joined<["-"], "fopenmp=">, Group<f_Group>;
def fopenmp_use_tls : Flag<["-"], "fopenmp-use-tls">, Group<f_Group>,
Flags<[NoArgumentUnused, HelpHidden]>;
def fnoopenmp_use_tls : Flag<["-"], "fnoopenmp-use-tls">, Group<f_Group>,
Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
def fopenmp_targets_EQ : CommaJoined<["-"], "fopenmp-targets=">, Flags<[DriverOption, CC1Option]>,
HelpText<"Specify comma-separated list of triples OpenMP offloading targets to be supported">;
def fopenmp_dump_offload_linker_script : Flag<["-"], "fopenmp-dump-offload-linker-script">,
Group<f_Group>, Flags<[NoArgumentUnused, HelpHidden]>;
def fopenmp_relocatable_target : Flag<["-"], "fopenmp-relocatable-target">,
Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
def fnoopenmp_relocatable_target : Flag<["-"], "fnoopenmp-relocatable-target">,
Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
def fopenmp_simd : Flag<["-"], "fopenmp-simd">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused]>,
HelpText<"Emit OpenMP code only for SIMD-based constructs.">;
def fno_openmp_simd : Flag<["-"], "fno-openmp-simd">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused]>;
def fopenmp_cuda_mode : Flag<["-"], "fopenmp-cuda-mode">, Group<f_Group>,
Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
def fno_openmp_cuda_mode : Flag<["-"], "fno-openmp-cuda-mode">, Group<f_Group>,
Flags<[NoArgumentUnused, HelpHidden]>;
def fno_optimize_sibling_calls : Flag<["-"], "fno-optimize-sibling-calls">, Group<f_Group>;
def foptimize_sibling_calls : Flag<["-"], "foptimize-sibling-calls">, Group<f_Group>;
def fno_escaping_block_tail_calls : Flag<["-"], "fno-escaping-block-tail-calls">, Group<f_Group>, Flags<[CC1Option]>;
def fescaping_block_tail_calls : Flag<["-"], "fescaping-block-tail-calls">, Group<f_Group>;
def force__cpusubtype__ALL : Flag<["-"], "force_cpusubtype_ALL">;
def force__flat__namespace : Flag<["-"], "force_flat_namespace">;
def force__load : Separate<["-"], "force_load">;
def force_addr : Joined<["-"], "fforce-addr">, Group<clang_ignored_f_Group>;
def foutput_class_dir_EQ : Joined<["-"], "foutput-class-dir=">, Group<f_Group>;
def fpack_struct : Flag<["-"], "fpack-struct">, Group<f_Group>;
def fno_pack_struct : Flag<["-"], "fno-pack-struct">, Group<f_Group>;
def fpack_struct_EQ : Joined<["-"], "fpack-struct=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Specify the default maximum struct packing alignment">;
def fmax_type_align_EQ : Joined<["-"], "fmax-type-align=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Specify the maximum alignment to enforce on pointers lacking an explicit alignment">;
def fno_max_type_align : Flag<["-"], "fno-max-type-align">, Group<f_Group>;
def fpascal_strings : Flag<["-"], "fpascal-strings">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Recognize and construct Pascal-style string literals">;
def fpcc_struct_return : Flag<["-"], "fpcc-struct-return">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Override the default ABI to return all structs on the stack">;
def fpch_preprocess : Flag<["-"], "fpch-preprocess">, Group<f_Group>;
def fpic : Flag<["-"], "fpic">, Group<f_Group>;
def fno_pic : Flag<["-"], "fno-pic">, Group<f_Group>;
def fpie : Flag<["-"], "fpie">, Group<f_Group>;
def fno_pie : Flag<["-"], "fno-pie">, Group<f_Group>;
def fplt : Flag<["-"], "fplt">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use the PLT to make function calls">;
def fno_plt : Flag<["-"], "fno-plt">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Do not use the PLT to make function calls">;
def fropi : Flag<["-"], "fropi">, Group<f_Group>;
def fno_ropi : Flag<["-"], "fno-ropi">, Group<f_Group>;
def frwpi : Flag<["-"], "frwpi">, Group<f_Group>;
def fno_rwpi : Flag<["-"], "fno-rwpi">, Group<f_Group>;
def fplugin_EQ : Joined<["-"], "fplugin=">, Group<f_Group>, Flags<[DriverOption]>, MetaVarName<"<dsopath>">,
HelpText<"Load the named plugin (dynamic shared object)">;
def fpreserve_as_comments : Flag<["-"], "fpreserve-as-comments">, Group<f_Group>;
def fno_preserve_as_comments : Flag<["-"], "fno-preserve-as-comments">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Do not preserve comments in inline assembly">;
def fprofile_arcs : Flag<["-"], "fprofile-arcs">, Group<f_Group>;
def fno_profile_arcs : Flag<["-"], "fno-profile-arcs">, Group<f_Group>;
def framework : Separate<["-"], "framework">, Flags<[LinkerInput]>;
def frandom_seed_EQ : Joined<["-"], "frandom-seed=">, Group<clang_ignored_f_Group>;
def freg_struct_return : Flag<["-"], "freg-struct-return">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Override the default ABI to return small structs in registers">;
def frtti : Flag<["-"], "frtti">, Group<f_Group>;
def : Flag<["-"], "fsched-interblock">, Group<clang_ignored_f_Group>;
def fshort_enums : Flag<["-"], "fshort-enums">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Allocate to an enum type only as many bytes as it needs for the declared range of possible values">;
def fchar8__t : Flag<["-"], "fchar8_t">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable C++ builtin type char8_t">;
def fno_char8__t : Flag<["-"], "fno-char8_t">, Group<f_Group>,
HelpText<"Disable C++ builtin type char8_t">;
def fshort_wchar : Flag<["-"], "fshort-wchar">, Group<f_Group>,
HelpText<"Force wchar_t to be a short unsigned int">;
def fno_short_wchar : Flag<["-"], "fno-short-wchar">, Group<f_Group>,
HelpText<"Force wchar_t to be an unsigned int">;
def fshow_overloads_EQ : Joined<["-"], "fshow-overloads=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Which overload candidates to show when overload resolution fails: "
"best|all; defaults to all">, Values<"best,all">;
def fshow_column : Flag<["-"], "fshow-column">, Group<f_Group>, Flags<[CC1Option]>;
def fshow_source_location : Flag<["-"], "fshow-source-location">, Group<f_Group>;
def fspell_checking : Flag<["-"], "fspell-checking">, Group<f_Group>;
def fspell_checking_limit_EQ : Joined<["-"], "fspell-checking-limit=">, Group<f_Group>;
def fsigned_bitfields : Flag<["-"], "fsigned-bitfields">, Group<f_Group>;
def fsigned_char : Flag<["-"], "fsigned-char">, Group<f_Group>;
def fno_signed_char : Flag<["-"], "fno-signed-char">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Char is unsigned">;
def fsplit_stack : Flag<["-"], "fsplit-stack">, Group<f_Group>;
def fstack_protector_all : Flag<["-"], "fstack-protector-all">, Group<f_Group>,
HelpText<"Force the usage of stack protectors for all functions">;
def fstack_protector_strong : Flag<["-"], "fstack-protector-strong">, Group<f_Group>,
HelpText<"Use a strong heuristic to apply stack protectors to functions">;
def fstack_protector : Flag<["-"], "fstack-protector">, Group<f_Group>,
HelpText<"Enable stack protectors for functions potentially vulnerable to stack smashing">;
def fstandalone_debug : Flag<["-"], "fstandalone-debug">, Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Emit full debug info for all types used by the program">;
def fno_standalone_debug : Flag<["-"], "fno-standalone-debug">, Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Limit debug information produced to reduce size of debug binary">;
def flimit_debug_info : Flag<["-"], "flimit-debug-info">, Flags<[CoreOption]>, Alias<fno_standalone_debug>;
def fno_limit_debug_info : Flag<["-"], "fno-limit-debug-info">, Flags<[CoreOption]>, Alias<fstandalone_debug>;
def fdebug_macro : Flag<["-"], "fdebug-macro">, Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Emit macro debug information">;
def fno_debug_macro : Flag<["-"], "fno-debug-macro">, Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Do not emit macro debug information">;
def fstrict_aliasing : Flag<["-"], "fstrict-aliasing">, Group<f_Group>,
Flags<[DriverOption, CoreOption]>;
def fstrict_enums : Flag<["-"], "fstrict-enums">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable optimizations based on the strict definition of an enum's "
"value range">;
def fstrict_vtable_pointers: Flag<["-"], "fstrict-vtable-pointers">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable optimizations based on the strict rules for overwriting "
"polymorphic C++ objects">;
def fstrict_overflow : Flag<["-"], "fstrict-overflow">, Group<f_Group>;
def fsyntax_only : Flag<["-"], "fsyntax-only">,
Flags<[DriverOption,CoreOption,CC1Option]>, Group<Action_Group>;
def ftabstop_EQ : Joined<["-"], "ftabstop=">, Group<f_Group>;
def ftemplate_depth_EQ : Joined<["-"], "ftemplate-depth=">, Group<f_Group>;
def ftemplate_depth_ : Joined<["-"], "ftemplate-depth-">, Group<f_Group>;
def ftemplate_backtrace_limit_EQ : Joined<["-"], "ftemplate-backtrace-limit=">,
Group<f_Group>;
def foperator_arrow_depth_EQ : Joined<["-"], "foperator-arrow-depth=">,
Group<f_Group>;
def fsave_optimization_record : Flag<["-"], "fsave-optimization-record">,
Group<f_Group>, HelpText<"Generate a YAML optimization record file">;
def fno_save_optimization_record : Flag<["-"], "fno-save-optimization-record">,
Group<f_Group>, Flags<[NoArgumentUnused]>;
def foptimization_record_file_EQ : Joined<["-"], "foptimization-record-file=">,
Group<f_Group>,
HelpText<"Specify the file name of any generated YAML optimization record">;
def ftest_coverage : Flag<["-"], "ftest-coverage">, Group<f_Group>;
def fvectorize : Flag<["-"], "fvectorize">, Group<f_Group>,
HelpText<"Enable the loop vectorization passes">;
def fno_vectorize : Flag<["-"], "fno-vectorize">, Group<f_Group>;
def : Flag<["-"], "ftree-vectorize">, Alias<fvectorize>;
def : Flag<["-"], "fno-tree-vectorize">, Alias<fno_vectorize>;
def fslp_vectorize : Flag<["-"], "fslp-vectorize">, Group<f_Group>,
HelpText<"Enable the superword-level parallelism vectorization passes">;
def fno_slp_vectorize : Flag<["-"], "fno-slp-vectorize">, Group<f_Group>;
def : Flag<["-"], "ftree-slp-vectorize">, Alias<fslp_vectorize>;
def : Flag<["-"], "fno-tree-slp-vectorize">, Alias<fno_slp_vectorize>;
def Wlarge_by_value_copy_def : Flag<["-"], "Wlarge-by-value-copy">,
HelpText<"Warn if a function definition returns or accepts an object larger "
"in bytes than a given value">, Flags<[HelpHidden]>;
def Wlarge_by_value_copy_EQ : Joined<["-"], "Wlarge-by-value-copy=">, Flags<[CC1Option]>;
// These "special" warning flags are effectively processed as f_Group flags by the driver:
// Just silence warnings about -Wlarger-than for now.
def Wlarger_than_EQ : Joined<["-"], "Wlarger-than=">, Group<clang_ignored_f_Group>;
def Wlarger_than_ : Joined<["-"], "Wlarger-than-">, Alias<Wlarger_than_EQ>;
def Wframe_larger_than_EQ : Joined<["-"], "Wframe-larger-than=">, Group<f_Group>, Flags<[DriverOption]>;
def : Flag<["-"], "fterminated-vtables">, Alias<fapple_kext>;
def fthreadsafe_statics : Flag<["-"], "fthreadsafe-statics">, Group<f_Group>;
def ftime_report : Flag<["-"], "ftime-report">, Group<f_Group>, Flags<[CC1Option]>;
def ftlsmodel_EQ : Joined<["-"], "ftls-model=">, Group<f_Group>, Flags<[CC1Option]>;
def ftrapv : Flag<["-"], "ftrapv">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Trap on integer overflow">;
def ftrapv_handler_EQ : Joined<["-"], "ftrapv-handler=">, Group<f_Group>,
MetaVarName<"<function name>">,
HelpText<"Specify the function to be called on overflow">;
def ftrapv_handler : Separate<["-"], "ftrapv-handler">, Group<f_Group>, Flags<[CC1Option]>;
def ftrap_function_EQ : Joined<["-"], "ftrap-function=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Issue call to specified function rather than a trap instruction">;
def funit_at_a_time : Flag<["-"], "funit-at-a-time">, Group<f_Group>;
def funroll_loops : Flag<["-"], "funroll-loops">, Group<f_Group>,
HelpText<"Turn on loop unroller">, Flags<[CC1Option]>;
def fno_unroll_loops : Flag<["-"], "fno-unroll-loops">, Group<f_Group>,
HelpText<"Turn off loop unroller">, Flags<[CC1Option]>;
def freroll_loops : Flag<["-"], "freroll-loops">, Group<f_Group>,
HelpText<"Turn on loop reroller">, Flags<[CC1Option]>;
def fno_reroll_loops : Flag<["-"], "fno-reroll-loops">, Group<f_Group>,
HelpText<"Turn off loop reroller">;
def ftrigraphs : Flag<["-"], "ftrigraphs">, Group<f_Group>,
HelpText<"Process trigraph sequences">, Flags<[CC1Option]>;
def fno_trigraphs : Flag<["-"], "fno-trigraphs">, Group<f_Group>,
HelpText<"Do not process trigraph sequences">, Flags<[CC1Option]>;
def funsigned_bitfields : Flag<["-"], "funsigned-bitfields">, Group<f_Group>;
def funsigned_char : Flag<["-"], "funsigned-char">, Group<f_Group>;
def fno_unsigned_char : Flag<["-"], "fno-unsigned-char">;
def funwind_tables : Flag<["-"], "funwind-tables">, Group<f_Group>;
def fuse_cxa_atexit : Flag<["-"], "fuse-cxa-atexit">, Group<f_Group>;
def fregister_global_dtors_with_atexit : Flag<["-"], "fregister-global-dtors-with-atexit">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use atexit or __cxa_atexit to register global destructors">;
def fuse_init_array : Flag<["-"], "fuse-init-array">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use .init_array instead of .ctors">;
def fno_var_tracking : Flag<["-"], "fno-var-tracking">, Group<clang_ignored_f_Group>;
def fverbose_asm : Flag<["-"], "fverbose-asm">, Group<f_Group>;
def fvisibility_EQ : Joined<["-"], "fvisibility=">, Group<f_Group>,
HelpText<"Set the default symbol visibility for all global declarations">, Values<"hidden,default">;
def fvisibility_inlines_hidden : Flag<["-"], "fvisibility-inlines-hidden">, Group<f_Group>,
HelpText<"Give inline C++ member functions hidden visibility by default">,
Flags<[CC1Option]>;
def fvisibility_ms_compat : Flag<["-"], "fvisibility-ms-compat">, Group<f_Group>,
HelpText<"Give global types 'default' visibility and global functions and "
"variables 'hidden' visibility by default">;
def fwhole_program_vtables : Flag<["-"], "fwhole-program-vtables">, Group<f_Group>,
Flags<[CoreOption, CC1Option]>,
HelpText<"Enables whole-program vtable optimization. Requires -flto">;
def fno_whole_program_vtables : Flag<["-"], "fno-whole-program-vtables">, Group<f_Group>,
Flags<[CoreOption]>;
def fforce_emit_vtables : Flag<["-"], "fforce-emit-vtables">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Emits more virtual tables to improve devirtualization">;
def fno_force_emit_vtables : Flag<["-"], "fno-force-emit-vtables">, Group<f_Group>,
Flags<[CoreOption]>;
def fwrapv : Flag<["-"], "fwrapv">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Treat signed integer overflow as two's complement">;
def fwritable_strings : Flag<["-"], "fwritable-strings">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Store string literals as writable data">;
def fzero_initialized_in_bss : Flag<["-"], "fzero-initialized-in-bss">, Group<f_Group>;
def ffunction_sections : Flag<["-"], "ffunction-sections">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Place each function in its own section (ELF Only)">;
def fno_function_sections : Flag<["-"], "fno-function-sections">,
Group<f_Group>, Flags<[CC1Option]>;
def fdata_sections : Flag <["-"], "fdata-sections">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Place each data in its own section (ELF Only)">;
def fno_data_sections : Flag <["-"], "fno-data-sections">, Group<f_Group>,
Flags<[CC1Option]>;
def fstack_size_section : Flag<["-"], "fstack-size-section">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Emit section containing metadata on function stack sizes">;
def fno_stack_size_section : Flag<["-"], "fno-stack-size-section">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Don't emit section containing metadata on function stack sizes">;
def funique_section_names : Flag <["-"], "funique-section-names">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use unique names for text and data sections (ELF Only)">;
def fno_unique_section_names : Flag <["-"], "fno-unique-section-names">,
Group<f_Group>, Flags<[CC1Option]>;
def fstrict_return : Flag<["-"], "fstrict-return">, Group<f_Group>,
Flags<[CC1Option]>,
HelpText<"Always treat control flow paths that fall off the end of a "
"non-void function as unreachable">;
def fno_strict_return : Flag<["-"], "fno-strict-return">, Group<f_Group>,
Flags<[CC1Option]>;
def fallow_editor_placeholders : Flag<["-"], "fallow-editor-placeholders">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Treat editor placeholders as valid source code">;
def fno_allow_editor_placeholders : Flag<["-"],
"fno-allow-editor-placeholders">, Group<f_Group>;
def fdebug_types_section: Flag <["-"], "fdebug-types-section">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Place debug types in their own section (ELF Only)">;
def fno_debug_types_section: Flag<["-"], "fno-debug-types-section">, Group<f_Group>,
Flags<[CC1Option]>;
def fsplit_dwarf_inlining: Flag <["-"], "fsplit-dwarf-inlining">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Provide minimal debug info in the object/executable to facilitate online symbolication/stack traces in the absence of .dwo/.dwp files when using Split DWARF">;
def fno_split_dwarf_inlining: Flag<["-"], "fno-split-dwarf-inlining">, Group<f_Group>,
Flags<[CC1Option]>;
def fdebug_prefix_map_EQ
: Joined<["-"], "fdebug-prefix-map=">, Group<f_Group>,
Flags<[CC1Option,CC1AsOption]>,
HelpText<"remap file source paths in debug info">;
def g_Flag : Flag<["-"], "g">, Group<g_Group>,
HelpText<"Generate source-level debug information">;
def gline_tables_only : Flag<["-"], "gline-tables-only">, Group<gN_Group>,
Flags<[CoreOption]>, HelpText<"Emit debug line number tables only">;
def gmlt : Flag<["-"], "gmlt">, Alias<gline_tables_only>;
def g0 : Flag<["-"], "g0">, Group<gN_Group>;
def g1 : Flag<["-"], "g1">, Group<gN_Group>, Alias<gline_tables_only>;
def g2 : Flag<["-"], "g2">, Group<gN_Group>;
def g3 : Flag<["-"], "g3">, Group<gN_Group>;
def ggdb : Flag<["-"], "ggdb">, Group<gTune_Group>;
def ggdb0 : Flag<["-"], "ggdb0">, Group<ggdbN_Group>;
def ggdb1 : Flag<["-"], "ggdb1">, Group<ggdbN_Group>;
def ggdb2 : Flag<["-"], "ggdb2">, Group<ggdbN_Group>;
def ggdb3 : Flag<["-"], "ggdb3">, Group<ggdbN_Group>;
def glldb : Flag<["-"], "glldb">, Group<gTune_Group>;
def gsce : Flag<["-"], "gsce">, Group<gTune_Group>;
def gdwarf_2 : Flag<["-"], "gdwarf-2">, Group<g_Group>,
HelpText<"Generate source-level debug information with dwarf version 2">;
def gdwarf_3 : Flag<["-"], "gdwarf-3">, Group<g_Group>,
HelpText<"Generate source-level debug information with dwarf version 3">;
def gdwarf_4 : Flag<["-"], "gdwarf-4">, Group<g_Group>,
HelpText<"Generate source-level debug information with dwarf version 4">;
def gdwarf_5 : Flag<["-"], "gdwarf-5">, Group<g_Group>,
HelpText<"Generate source-level debug information with dwarf version 5">;
def gcodeview : Flag<["-"], "gcodeview">,
HelpText<"Generate CodeView debug information">,
Flags<[CC1Option, CC1AsOption, CoreOption]>;
// Equivalent to our default dwarf version. Forces usual dwarf emission when
// CodeView is enabled.
def gdwarf : Flag<["-"], "gdwarf">, Alias<gdwarf_4>, Flags<[CoreOption]>;
def gfull : Flag<["-"], "gfull">, Group<g_Group>;
def gused : Flag<["-"], "gused">, Group<g_Group>;
def gstabs : Joined<["-"], "gstabs">, Group<g_Group>, Flags<[Unsupported]>;
def gcoff : Joined<["-"], "gcoff">, Group<g_Group>, Flags<[Unsupported]>;
def gxcoff : Joined<["-"], "gxcoff">, Group<g_Group>, Flags<[Unsupported]>;
def gvms : Joined<["-"], "gvms">, Group<g_Group>, Flags<[Unsupported]>;
def gtoggle : Flag<["-"], "gtoggle">, Group<g_flags_Group>, Flags<[Unsupported]>;
def grecord_gcc_switches : Flag<["-"], "grecord-gcc-switches">, Group<g_flags_Group>;
def gno_record_gcc_switches : Flag<["-"], "gno-record-gcc-switches">,
Group<g_flags_Group>;
def gstrict_dwarf : Flag<["-"], "gstrict-dwarf">, Group<g_flags_Group>;
def gno_strict_dwarf : Flag<["-"], "gno-strict-dwarf">, Group<g_flags_Group>;
def gcolumn_info : Flag<["-"], "gcolumn-info">, Group<g_flags_Group>, Flags<[CoreOption]>;
def gno_column_info : Flag<["-"], "gno-column-info">, Group<g_flags_Group>, Flags<[CoreOption]>;
def gsplit_dwarf : Flag<["-"], "gsplit-dwarf">, Group<g_flags_Group>;
def ggnu_pubnames : Flag<["-"], "ggnu-pubnames">, Group<g_flags_Group>, Flags<[CC1Option]>;
def gno_gnu_pubnames : Flag<["-"], "gno-gnu-pubnames">, Group<g_flags_Group>, Flags<[CC1Option]>;
def gdwarf_aranges : Flag<["-"], "gdwarf-aranges">, Group<g_flags_Group>;
def gmodules : Flag <["-"], "gmodules">, Group<gN_Group>,
HelpText<"Generate debug info with external references to clang modules"
" or precompiled headers">;
def gz : Flag<["-"], "gz">, Group<g_flags_Group>,
HelpText<"DWARF debug sections compression type">;
def gz_EQ : Joined<["-"], "gz=">, Group<g_flags_Group>,
HelpText<"DWARF debug sections compression type">;
def gembed_source : Flag<["-"], "gembed-source">, Group<g_flags_Group>, Flags<[CC1Option]>,
HelpText<"Embed source text in DWARF debug sections">;
def gno_embed_source : Flag<["-"], "gno-embed-source">, Group<g_flags_Group>,
Flags<[DriverOption]>,
HelpText<"Restore the default behavior of not embedding source text in DWARF debug sections">;
def headerpad__max__install__names : Joined<["-"], "headerpad_max_install_names">;
def help : Flag<["-", "--"], "help">, Flags<[CC1Option,CC1AsOption]>,
HelpText<"Display available options">;
def index_header_map : Flag<["-"], "index-header-map">, Flags<[CC1Option]>,
HelpText<"Make the next included directory (-I or -F) an indexer header map">;
def idirafter : JoinedOrSeparate<["-"], "idirafter">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Add directory to AFTER include search path">;
def iframework : JoinedOrSeparate<["-"], "iframework">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Add directory to SYSTEM framework search path">;
def iframeworkwithsysroot : JoinedOrSeparate<["-"], "iframeworkwithsysroot">,
Group<clang_i_Group>,
HelpText<"Add directory to SYSTEM framework search path, "
"absolute paths are relative to -isysroot">,
MetaVarName<"<directory>">, Flags<[CC1Option]>;
def imacros : JoinedOrSeparate<["-", "--"], "imacros">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Include macros from file before parsing">, MetaVarName<"<file>">;
def image__base : Separate<["-"], "image_base">;
def include_ : JoinedOrSeparate<["-", "--"], "include">, Group<clang_i_Group>, EnumName<"include">,
MetaVarName<"<file>">, HelpText<"Include file before parsing">, Flags<[CC1Option]>;
def include_pch : Separate<["-"], "include-pch">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Include precompiled header file">, MetaVarName<"<file>">;
def relocatable_pch : Flag<["-", "--"], "relocatable-pch">, Flags<[CC1Option]>,
HelpText<"Whether to build a relocatable precompiled header">;
def verify_pch : Flag<["-"], "verify-pch">, Group<Action_Group>, Flags<[CC1Option]>,
HelpText<"Load and verify that a pre-compiled header file is not stale">;
def init : Separate<["-"], "init">;
def install__name : Separate<["-"], "install_name">;
def iprefix : JoinedOrSeparate<["-"], "iprefix">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Set the -iwithprefix/-iwithprefixbefore prefix">, MetaVarName<"<dir>">;
def iquote : JoinedOrSeparate<["-"], "iquote">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Add directory to QUOTE include search path">, MetaVarName<"<directory>">;
def isysroot : JoinedOrSeparate<["-"], "isysroot">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Set the system root directory (usually /)">, MetaVarName<"<dir>">;
def isystem : JoinedOrSeparate<["-"], "isystem">, Group<clang_i_Group>,
Flags<[CC1Option]>,
HelpText<"Add directory to SYSTEM include search path">, MetaVarName<"<directory>">;
def isystem_after : JoinedOrSeparate<["-"], "isystem-after">,
Group<clang_i_Group>, Flags<[DriverOption]>, MetaVarName<"<directory>">,
HelpText<"Add directory to end of the SYSTEM include search path">;
def iwithprefixbefore : JoinedOrSeparate<["-"], "iwithprefixbefore">, Group<clang_i_Group>,
HelpText<"Set directory to include search path with prefix">, MetaVarName<"<dir>">,
Flags<[CC1Option]>;
def iwithprefix : JoinedOrSeparate<["-"], "iwithprefix">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Set directory to SYSTEM include search path with prefix">, MetaVarName<"<dir>">;
def iwithsysroot : JoinedOrSeparate<["-"], "iwithsysroot">, Group<clang_i_Group>,
HelpText<"Add directory to SYSTEM include search path, "
"absolute paths are relative to -isysroot">, MetaVarName<"<directory>">,
Flags<[CC1Option]>;
def ivfsoverlay : JoinedOrSeparate<["-"], "ivfsoverlay">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Overlay the virtual filesystem described by file over the real file system">;
def imultilib : Separate<["-"], "imultilib">, Group<gfortran_Group>;
def keep__private__externs : Flag<["-"], "keep_private_externs">;
def l : JoinedOrSeparate<["-"], "l">, Flags<[LinkerInput, RenderJoined]>,
Group<Link_Group>;
def lazy__framework : Separate<["-"], "lazy_framework">, Flags<[LinkerInput]>;
def lazy__library : Separate<["-"], "lazy_library">, Flags<[LinkerInput]>;
def mlittle_endian : Flag<["-"], "mlittle-endian">, Flags<[DriverOption]>;
def EL : Flag<["-"], "EL">, Alias<mlittle_endian>;
def mbig_endian : Flag<["-"], "mbig-endian">, Flags<[DriverOption]>;
def EB : Flag<["-"], "EB">, Alias<mbig_endian>;
def m16 : Flag<["-"], "m16">, Group<m_Group>, Flags<[DriverOption, CoreOption]>;
def m32 : Flag<["-"], "m32">, Group<m_Group>, Flags<[DriverOption, CoreOption]>;
def mqdsp6_compat : Flag<["-"], "mqdsp6-compat">, Group<m_Group>, Flags<[DriverOption,CC1Option]>,
HelpText<"Enable hexagon-qdsp6 backward compatibility">;
def m64 : Flag<["-"], "m64">, Group<m_Group>, Flags<[DriverOption, CoreOption]>;
def mx32 : Flag<["-"], "mx32">, Group<m_Group>, Flags<[DriverOption, CoreOption]>;
def mabi_EQ : Joined<["-"], "mabi=">, Group<m_Group>;
def miamcu : Flag<["-"], "miamcu">, Group<m_Group>, Flags<[DriverOption, CoreOption]>,
HelpText<"Use Intel MCU ABI">;
def mno_iamcu : Flag<["-"], "mno-iamcu">, Group<m_Group>, Flags<[DriverOption, CoreOption]>;
def malign_functions_EQ : Joined<["-"], "malign-functions=">, Group<clang_ignored_m_Group>;
def malign_loops_EQ : Joined<["-"], "malign-loops=">, Group<clang_ignored_m_Group>;
def malign_jumps_EQ : Joined<["-"], "malign-jumps=">, Group<clang_ignored_m_Group>;
def mfancy_math_387 : Flag<["-"], "mfancy-math-387">, Group<clang_ignored_m_Group>;
def mlong_calls : Flag<["-"], "mlong-calls">, Group<m_Group>,
HelpText<"Generate branches with extended addressability, usually via indirect jumps.">;
def mno_long_calls : Flag<["-"], "mno-long-calls">, Group<m_Group>,
HelpText<"Restore the default behaviour of not generating long calls">;
def mexecute_only : Flag<["-"], "mexecute-only">, Group<m_arm_Features_Group>,
HelpText<"Disallow generation of data access to code sections (ARM only)">;
def mno_execute_only : Flag<["-"], "mno-execute-only">, Group<m_arm_Features_Group>,
HelpText<"Allow generation of data access to code sections (ARM only)">;
def mtp_mode_EQ : Joined<["-"], "mtp=">, Group<m_arm_Features_Group>, Values<"soft, cp15">,
HelpText<"Read thread pointer from coprocessor register (ARM only)">;
def mpure_code : Flag<["-"], "mpure-code">, Alias<mexecute_only>; // Alias for GCC compatibility
def mno_pure_code : Flag<["-"], "mno-pure-code">, Alias<mno_execute_only>;
def mtvos_version_min_EQ : Joined<["-"], "mtvos-version-min=">, Group<m_Group>;
def mappletvos_version_min_EQ : Joined<["-"], "mappletvos-version-min=">, Alias<mtvos_version_min_EQ>;
def mtvos_simulator_version_min_EQ : Joined<["-"], "mtvos-simulator-version-min=">;
def mappletvsimulator_version_min_EQ : Joined<["-"], "mappletvsimulator-version-min=">, Alias<mtvos_simulator_version_min_EQ>;
def mwatchos_version_min_EQ : Joined<["-"], "mwatchos-version-min=">, Group<m_Group>;
def mwatchos_simulator_version_min_EQ : Joined<["-"], "mwatchos-simulator-version-min=">;
def mwatchsimulator_version_min_EQ : Joined<["-"], "mwatchsimulator-version-min=">, Alias<mwatchos_simulator_version_min_EQ>;
def march_EQ : Joined<["-"], "march=">, Group<m_Group>;
def masm_EQ : Joined<["-"], "masm=">, Group<m_Group>, Flags<[DriverOption]>;
def mcmodel_EQ : Joined<["-"], "mcmodel=">, Group<m_Group>;
def mimplicit_it_EQ : Joined<["-"], "mimplicit-it=">, Group<m_Group>;
def mdefault_build_attributes : Joined<["-"], "mdefault-build-attributes">, Group<m_Group>;
def mno_default_build_attributes : Joined<["-"], "mno-default-build-attributes">, Group<m_Group>;
def mconstant_cfstrings : Flag<["-"], "mconstant-cfstrings">, Group<clang_ignored_m_Group>;
def mconsole : Joined<["-"], "mconsole">, Group<m_Group>, Flags<[DriverOption]>;
def mwindows : Joined<["-"], "mwindows">, Group<m_Group>, Flags<[DriverOption]>;
def mdll : Joined<["-"], "mdll">, Group<m_Group>, Flags<[DriverOption]>;
def municode : Joined<["-"], "municode">, Group<m_Group>, Flags<[DriverOption]>;
def mthreads : Joined<["-"], "mthreads">, Group<m_Group>, Flags<[DriverOption]>;
def mcpu_EQ : Joined<["-"], "mcpu=">, Group<m_Group>;
def mmcu_EQ : Joined<["-"], "mmcu=">, Group<m_Group>;
def mdynamic_no_pic : Joined<["-"], "mdynamic-no-pic">, Group<m_Group>;
def mfix_and_continue : Flag<["-"], "mfix-and-continue">, Group<clang_ignored_m_Group>;
def mieee_fp : Flag<["-"], "mieee-fp">, Group<clang_ignored_m_Group>;
def minline_all_stringops : Flag<["-"], "minline-all-stringops">, Group<clang_ignored_m_Group>;
def mno_inline_all_stringops : Flag<["-"], "mno-inline-all-stringops">, Group<clang_ignored_m_Group>;
def malign_double : Flag<["-"], "malign-double">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Align doubles to two words in structs (x86 only)">;
def mfloat_abi_EQ : Joined<["-"], "mfloat-abi=">, Group<m_Group>, Values<"soft,softfp,hard">;
def mfpmath_EQ : Joined<["-"], "mfpmath=">, Group<m_Group>;
def mfpu_EQ : Joined<["-"], "mfpu=">, Group<m_Group>;
def mhwdiv_EQ : Joined<["-"], "mhwdiv=">, Group<m_Group>;
def mglobal_merge : Flag<["-"], "mglobal-merge">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Enable merging of globals">;
def mhard_float : Flag<["-"], "mhard-float">, Group<m_Group>;
def miphoneos_version_min_EQ : Joined<["-"], "miphoneos-version-min=">, Group<m_Group>;
def mios_version_min_EQ : Joined<["-"], "mios-version-min=">,
Alias<miphoneos_version_min_EQ>, HelpText<"Set iOS deployment target">;
def mios_simulator_version_min_EQ : Joined<["-"], "mios-simulator-version-min=">;
def miphonesimulator_version_min_EQ : Joined<["-"], "miphonesimulator-version-min=">, Alias<mios_simulator_version_min_EQ>;
def mkernel : Flag<["-"], "mkernel">, Group<m_Group>;
def mlinker_version_EQ : Joined<["-"], "mlinker-version=">,
Flags<[DriverOption]>;
def mllvm : Separate<["-"], "mllvm">, Flags<[CC1Option,CC1AsOption,CoreOption]>,
HelpText<"Additional arguments to forward to LLVM's option processing">;
def mmacosx_version_min_EQ : Joined<["-"], "mmacosx-version-min=">,
Group<m_Group>, HelpText<"Set Mac OS X deployment target">;
def mmacos_version_min_EQ : Joined<["-"], "mmacos-version-min=">,
Group<m_Group>, Alias<mmacosx_version_min_EQ>;
def mms_bitfields : Flag<["-"], "mms-bitfields">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Set the default structure layout to be compatible with the Microsoft compiler standard">;
def moutline : Flag<["-"], "moutline">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Enable function outlining (AArch64 only)">;
def mno_outline : Flag<["-"], "mno-outline">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Disable function outlining (AArch64 only)">;
def mno_ms_bitfields : Flag<["-"], "mno-ms-bitfields">, Group<m_Group>,
HelpText<"Do not set the default structure layout to be compatible with the Microsoft compiler standard">;
def mstackrealign : Flag<["-"], "mstackrealign">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Force realign the stack at entry to every function">;
def mstack_alignment : Joined<["-"], "mstack-alignment=">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Set the stack alignment">;
def mstack_probe_size : Joined<["-"], "mstack-probe-size=">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Set the stack probe size">;
def mstack_arg_probe : Flag<["-"], "mstack-arg-probe">, Group<m_Group>,
HelpText<"Enable stack probes">;
def mno_stack_arg_probe : Flag<["-"], "mno-stack-arg-probe">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Disable stack probes which are enabled by default">;
def mthread_model : Separate<["-"], "mthread-model">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"The thread model to use, e.g. posix, single (posix by default)">, Values<"posix,single">;
def meabi : Separate<["-"], "meabi">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Set EABI type, e.g. 4, 5 or gnu (default depends on triple)">, Values<"default,4,5,gnu">;
def mno_constant_cfstrings : Flag<["-"], "mno-constant-cfstrings">, Group<m_Group>;
def mno_global_merge : Flag<["-"], "mno-global-merge">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Disable merging of globals">;
def mno_pascal_strings : Flag<["-"], "mno-pascal-strings">,
Alias<fno_pascal_strings>;
def mno_red_zone : Flag<["-"], "mno-red-zone">, Group<m_Group>;
def mno_relax_all : Flag<["-"], "mno-relax-all">, Group<m_Group>;
def mno_rtd: Flag<["-"], "mno-rtd">, Group<m_Group>;
def mno_soft_float : Flag<["-"], "mno-soft-float">, Group<m_Group>;
def mno_stackrealign : Flag<["-"], "mno-stackrealign">, Group<m_Group>;
def mrelax : Flag<["-"], "mrelax">, Group<m_riscv_Features_Group>,
HelpText<"Enable linker relaxation">;
def mno_relax : Flag<["-"], "mno-relax">, Group<m_riscv_Features_Group>,
HelpText<"Disable linker relaxation">;
def munaligned_access : Flag<["-"], "munaligned-access">, Group<m_arm_Features_Group>,
HelpText<"Allow memory accesses to be unaligned (AArch32/AArch64 only)">;
def mno_unaligned_access : Flag<["-"], "mno-unaligned-access">, Group<m_arm_Features_Group>,
HelpText<"Force all memory accesses to be aligned (AArch32/AArch64 only)">;
def mstrict_align : Flag<["-"], "mstrict-align">, Alias<mno_unaligned_access>, Flags<[CC1Option,HelpHidden]>,
HelpText<"Force all memory accesses to be aligned (same as mno-unaligned-access)">;
def mno_thumb : Flag<["-"], "mno-thumb">, Group<m_arm_Features_Group>;
def mrestrict_it: Flag<["-"], "mrestrict-it">, Group<m_arm_Features_Group>,
HelpText<"Disallow generation of deprecated IT blocks for ARMv8. It is on by default for ARMv8 Thumb mode.">;
def mno_restrict_it: Flag<["-"], "mno-restrict-it">, Group<m_arm_Features_Group>,
HelpText<"Allow generation of deprecated IT blocks for ARMv8. It is off by default for ARMv8 Thumb mode">;
def marm : Flag<["-"], "marm">, Alias<mno_thumb>;
def ffixed_r9 : Flag<["-"], "ffixed-r9">, Group<m_arm_Features_Group>,
HelpText<"Reserve the r9 register (ARM only)">;
def mno_movt : Flag<["-"], "mno-movt">, Group<m_arm_Features_Group>,
HelpText<"Disallow use of movt/movw pairs (ARM only)">;
def mcrc : Flag<["-"], "mcrc">, Group<m_Group>,
HelpText<"Allow use of CRC instructions (ARM/Mips only)">;
def mnocrc : Flag<["-"], "mnocrc">, Group<m_arm_Features_Group>,
HelpText<"Disallow use of CRC instructions (ARM only)">;
def mno_neg_immediates: Flag<["-"], "mno-neg-immediates">, Group<m_arm_Features_Group>,
HelpText<"Disallow converting instructions with negative immediates to their negation or inversion.">;
def mgeneral_regs_only : Flag<["-"], "mgeneral-regs-only">, Group<m_aarch64_Features_Group>,
HelpText<"Generate code which only uses the general purpose registers (AArch64 only)">;
def mfix_cortex_a53_835769 : Flag<["-"], "mfix-cortex-a53-835769">,
Group<m_aarch64_Features_Group>,
HelpText<"Workaround Cortex-A53 erratum 835769 (AArch64 only)">;
def mno_fix_cortex_a53_835769 : Flag<["-"], "mno-fix-cortex-a53-835769">,
Group<m_aarch64_Features_Group>,
HelpText<"Don't workaround Cortex-A53 erratum 835769 (AArch64 only)">;
def ffixed_x18 : Flag<["-"], "ffixed-x18">, Group<m_aarch64_Features_Group>,
HelpText<"Reserve the x18 register (AArch64 only)">;
def ffixed_x20 : Flag<["-"], "ffixed-x20">, Group<m_aarch64_Features_Group>,
HelpText<"Reserve the x20 register (AArch64 only)">;
def msimd128 : Flag<["-"], "msimd128">, Group<m_wasm_Features_Group>;
def mno_simd128 : Flag<["-"], "mno-simd128">, Group<m_wasm_Features_Group>;
def mnontrapping_fptoint : Flag<["-"], "mnontrapping-fptoint">, Group<m_wasm_Features_Group>;
def mno_nontrapping_fptoint : Flag<["-"], "mno-nontrapping-fptoint">, Group<m_wasm_Features_Group>;
def msign_ext : Flag<["-"], "msign-ext">, Group<m_wasm_Features_Group>;
def mno_sign_ext : Flag<["-"], "mno-sign-ext">, Group<m_wasm_Features_Group>;
def mexception_handing : Flag<["-"], "mexception-handling">, Group<m_wasm_Features_Group>;
def mno_exception_handing : Flag<["-"], "mno-exception-handling">, Group<m_wasm_Features_Group>;
def mamdgpu_debugger_abi : Joined<["-"], "mamdgpu-debugger-abi=">,
Flags<[HelpHidden]>,
Group<m_Group>,
HelpText<"Generate additional code for specified <version> of debugger ABI (AMDGPU only)">,
MetaVarName<"<version>">;
def mxnack : Flag<["-"], "mxnack">, Group<m_amdgpu_Features_Group>,
HelpText<"Enable XNACK (AMDGPU only)">;
def mno_xnack : Flag<["-"], "mno-xnack">, Group<m_amdgpu_Features_Group>,
HelpText<"Disable XNACK (AMDGPU only)">;
def faltivec : Flag<["-"], "faltivec">, Group<f_Group>, Flags<[DriverOption]>;
def fno_altivec : Flag<["-"], "fno-altivec">, Group<f_Group>, Flags<[DriverOption]>;
def maltivec : Flag<["-"], "maltivec">, Group<m_ppc_Features_Group>;
def mno_altivec : Flag<["-"], "mno-altivec">, Group<m_ppc_Features_Group>;
def mvsx : Flag<["-"], "mvsx">, Group<m_ppc_Features_Group>;
def mno_vsx : Flag<["-"], "mno-vsx">, Group<m_ppc_Features_Group>;
def msecure_plt : Flag<["-"], "msecure-plt">, Group<m_ppc_Features_Group>;
def mpower8_vector : Flag<["-"], "mpower8-vector">,
Group<m_ppc_Features_Group>;
def mno_power8_vector : Flag<["-"], "mno-power8-vector">,
Group<m_ppc_Features_Group>;
def mpower9_vector : Flag<["-"], "mpower9-vector">,
Group<m_ppc_Features_Group>;
def mno_power9_vector : Flag<["-"], "mno-power9-vector">,
Group<m_ppc_Features_Group>;
def mpower8_crypto : Flag<["-"], "mcrypto">,
Group<m_ppc_Features_Group>;
def mnopower8_crypto : Flag<["-"], "mno-crypto">,
Group<m_ppc_Features_Group>;
def mdirect_move : Flag<["-"], "mdirect-move">,
Group<m_ppc_Features_Group>;
def mnodirect_move : Flag<["-"], "mno-direct-move">,
Group<m_ppc_Features_Group>;
def mhtm : Flag<["-"], "mhtm">, Group<m_ppc_Features_Group>;
def mno_htm : Flag<["-"], "mno-htm">, Group<m_ppc_Features_Group>;
def mfprnd : Flag<["-"], "mfprnd">, Group<m_ppc_Features_Group>;
def mno_fprnd : Flag<["-"], "mno-fprnd">, Group<m_ppc_Features_Group>;
def mcmpb : Flag<["-"], "mcmpb">, Group<m_ppc_Features_Group>;
def mno_cmpb : Flag<["-"], "mno-cmpb">, Group<m_ppc_Features_Group>;
def misel : Flag<["-"], "misel">, Group<m_ppc_Features_Group>;
def mno_isel : Flag<["-"], "mno-isel">, Group<m_ppc_Features_Group>;
def mmfocrf : Flag<["-"], "mmfocrf">, Group<m_ppc_Features_Group>;
def mmfcrf : Flag<["-"], "mmfcrf">, Alias<mmfocrf>;
def mno_mfocrf : Flag<["-"], "mno-mfocrf">, Group<m_ppc_Features_Group>;
def mno_mfcrf : Flag<["-"], "mno-mfcrf">, Alias<mno_mfocrf>;
def mpopcntd : Flag<["-"], "mpopcntd">, Group<m_ppc_Features_Group>;
def mno_popcntd : Flag<["-"], "mno-popcntd">, Group<m_ppc_Features_Group>;
def mqpx : Flag<["-"], "mqpx">, Group<m_ppc_Features_Group>;
def mno_qpx : Flag<["-"], "mno-qpx">, Group<m_ppc_Features_Group>;
def mcrbits : Flag<["-"], "mcrbits">, Group<m_ppc_Features_Group>;
def mno_crbits : Flag<["-"], "mno-crbits">, Group<m_ppc_Features_Group>;
def minvariant_function_descriptors :
Flag<["-"], "minvariant-function-descriptors">, Group<m_ppc_Features_Group>;
def mno_invariant_function_descriptors :
Flag<["-"], "mno-invariant-function-descriptors">,
Group<m_ppc_Features_Group>;
def mfloat128: Flag<["-"], "mfloat128">,
Group<m_ppc_Features_Group>;
def mno_float128 : Flag<["-"], "mno-float128">,
Group<m_ppc_Features_Group>;
def mlongcall: Flag<["-"], "mlongcall">,
Group<m_ppc_Features_Group>;
def mno_longcall : Flag<["-"], "mno-longcall">,
Group<m_ppc_Features_Group>;
def mvx : Flag<["-"], "mvx">, Group<m_Group>;
def mno_vx : Flag<["-"], "mno-vx">, Group<m_Group>;
def fzvector : Flag<["-"], "fzvector">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable System z vector language extension">;
def fno_zvector : Flag<["-"], "fno-zvector">, Group<f_Group>,
Flags<[CC1Option]>;
def mzvector : Flag<["-"], "mzvector">, Alias<fzvector>;
def mno_zvector : Flag<["-"], "mno-zvector">, Alias<fno_zvector>;
def mbackchain : Flag<["-"], "mbackchain">, Group<m_Group>, Flags<[DriverOption,CC1Option]>,
HelpText<"Link stack frames through backchain on System Z">;
def mno_backchain : Flag<["-"], "mno-backchain">, Group<m_Group>, Flags<[DriverOption,CC1Option]>;
def mno_warn_nonportable_cfstrings : Flag<["-"], "mno-warn-nonportable-cfstrings">, Group<m_Group>;
def mno_omit_leaf_frame_pointer : Flag<["-"], "mno-omit-leaf-frame-pointer">, Group<m_Group>;
def momit_leaf_frame_pointer : Flag<["-"], "momit-leaf-frame-pointer">, Group<m_Group>,
HelpText<"Omit frame pointer setup for leaf functions">, Flags<[CC1Option]>;
def moslib_EQ : Joined<["-"], "moslib=">, Group<m_Group>;
def mpascal_strings : Flag<["-"], "mpascal-strings">, Alias<fpascal_strings>;
def mred_zone : Flag<["-"], "mred-zone">, Group<m_Group>;
def mregparm_EQ : Joined<["-"], "mregparm=">, Group<m_Group>;
def mrelax_all : Flag<["-"], "mrelax-all">, Group<m_Group>, Flags<[CC1Option,CC1AsOption]>,
HelpText<"(integrated-as) Relax all machine instructions">;
def mincremental_linker_compatible : Flag<["-"], "mincremental-linker-compatible">, Group<m_Group>,
Flags<[CC1Option,CC1AsOption]>,
HelpText<"(integrated-as) Emit an object file which can be used with an incremental linker">;
def mno_incremental_linker_compatible : Flag<["-"], "mno-incremental-linker-compatible">, Group<m_Group>,
HelpText<"(integrated-as) Emit an object file which cannot be used with an incremental linker">;
def mrtd : Flag<["-"], "mrtd">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Make StdCall calling convention the default">;
def msmall_data_threshold_EQ : Joined <["-"], "msmall-data-threshold=">,
Group<m_Group>, Alias<G>;
def msoft_float : Flag<["-"], "msoft-float">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Use software floating point">;
def mno_implicit_float : Flag<["-"], "mno-implicit-float">, Group<m_Group>,
HelpText<"Don't generate implicit floating point instructions">;
def mimplicit_float : Flag<["-"], "mimplicit-float">, Group<m_Group>;
def mrecip : Flag<["-"], "mrecip">, Group<m_Group>;
def mrecip_EQ : CommaJoined<["-"], "mrecip=">, Group<m_Group>, Flags<[CC1Option]>;
def mprefer_vector_width_EQ : Joined<["-"], "mprefer-vector-width=">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Specifies preferred vector width for auto-vectorization. Defaults to 'none' which allows target specific decisions.">;
def mpie_copy_relocations : Flag<["-"], "mpie-copy-relocations">, Group<m_Group>,
Flags<[CC1Option]>,
HelpText<"Use copy relocations support for PIE builds">;
def mno_pie_copy_relocations : Flag<["-"], "mno-pie-copy-relocations">, Group<m_Group>;
def mfentry : Flag<["-"], "mfentry">, HelpText<"Insert calls to fentry at function entry (x86 only)">,
Flags<[CC1Option]>, Group<m_Group>;
def mips16 : Flag<["-"], "mips16">, Group<m_mips_Features_Group>;
def mno_mips16 : Flag<["-"], "mno-mips16">, Group<m_mips_Features_Group>;
def mmicromips : Flag<["-"], "mmicromips">, Group<m_mips_Features_Group>;
def mno_micromips : Flag<["-"], "mno-micromips">, Group<m_mips_Features_Group>;
def mxgot : Flag<["-"], "mxgot">, Group<m_mips_Features_Group>;
def mno_xgot : Flag<["-"], "mno-xgot">, Group<m_mips_Features_Group>;
def mldc1_sdc1 : Flag<["-"], "mldc1-sdc1">, Group<m_mips_Features_Group>;
def mno_ldc1_sdc1 : Flag<["-"], "mno-ldc1-sdc1">, Group<m_mips_Features_Group>;
def mcheck_zero_division : Flag<["-"], "mcheck-zero-division">,
Group<m_mips_Features_Group>;
def mno_check_zero_division : Flag<["-"], "mno-check-zero-division">,
Group<m_mips_Features_Group>;
def mcompact_branches_EQ : Joined<["-"], "mcompact-branches=">,
Group<m_mips_Features_Group>;
def mbranch_likely : Flag<["-"], "mbranch-likely">, Group<m_Group>,
IgnoredGCCCompat;
def mno_branch_likely : Flag<["-"], "mno-branch-likely">, Group<m_Group>,
IgnoredGCCCompat;
def mindirect_jump_EQ : Joined<["-"], "mindirect-jump=">,
Group<m_mips_Features_Group>,
HelpText<"Change indirect jump instructions to inhibit speculation">;
def mdsp : Flag<["-"], "mdsp">, Group<m_mips_Features_Group>;
def mno_dsp : Flag<["-"], "mno-dsp">, Group<m_mips_Features_Group>;
def mdspr2 : Flag<["-"], "mdspr2">, Group<m_mips_Features_Group>;
def mno_dspr2 : Flag<["-"], "mno-dspr2">, Group<m_mips_Features_Group>;
def msingle_float : Flag<["-"], "msingle-float">, Group<m_mips_Features_Group>;
def mdouble_float : Flag<["-"], "mdouble-float">, Group<m_mips_Features_Group>;
def mmadd4 : Flag<["-"], "mmadd4">, Group<m_mips_Features_Group>,
HelpText<"Enable the generation of 4-operand madd.s, madd.d and related instructions.">;
def mno_madd4 : Flag<["-"], "mno-madd4">, Group<m_mips_Features_Group>,
HelpText<"Disable the generation of 4-operand madd.s, madd.d and related instructions.">;
def mmsa : Flag<["-"], "mmsa">, Group<m_mips_Features_Group>,
HelpText<"Enable MSA ASE (MIPS only)">;
def mno_msa : Flag<["-"], "mno-msa">, Group<m_mips_Features_Group>,
HelpText<"Disable MSA ASE (MIPS only)">;
def mmt : Flag<["-"], "mmt">, Group<m_mips_Features_Group>,
HelpText<"Enable MT ASE (MIPS only)">;
def mno_mt : Flag<["-"], "mno-mt">, Group<m_mips_Features_Group>,
HelpText<"Disable MT ASE (MIPS only)">;
def mfp64 : Flag<["-"], "mfp64">, Group<m_mips_Features_Group>,
HelpText<"Use 64-bit floating point registers (MIPS only)">;
def mfp32 : Flag<["-"], "mfp32">, Group<m_mips_Features_Group>,
HelpText<"Use 32-bit floating point registers (MIPS only)">;
def mgpopt : Flag<["-"], "mgpopt">, Group<m_mips_Features_Group>,
HelpText<"Use GP relative accesses for symbols known to be in a small"
" data section (MIPS)">;
def mno_gpopt : Flag<["-"], "mno-gpopt">, Group<m_mips_Features_Group>,
HelpText<"Do not use GP relative accesses for symbols known to be in a small"
" data section (MIPS)">;
def mlocal_sdata : Flag<["-"], "mlocal-sdata">,
Group<m_mips_Features_Group>,
HelpText<"Extend the -G behaviour to object local data (MIPS)">;
def mno_local_sdata : Flag<["-"], "mno-local-sdata">,
Group<m_mips_Features_Group>,
HelpText<"Do not extend the -G behaviour to object local data (MIPS)">;
def mextern_sdata : Flag<["-"], "mextern-sdata">,
Group<m_mips_Features_Group>,
HelpText<"Assume that externally defined data is in the small data if it"
" meets the -G <size> threshold (MIPS)">;
def mno_extern_sdata : Flag<["-"], "mno-extern-sdata">,
Group<m_mips_Features_Group>,
HelpText<"Do not assume that externally defined data is in the small data if"
" it meets the -G <size> threshold (MIPS)">;
def membedded_data : Flag<["-"], "membedded-data">,
Group<m_mips_Features_Group>,
HelpText<"Place constants in the .rodata section instead of the .sdata "
"section even if they meet the -G <size> threshold (MIPS)">;
def mno_embedded_data : Flag<["-"], "mno-embedded-data">,
Group<m_mips_Features_Group>,
HelpText<"Do not place constants in the .rodata section instead of the "
".sdata if they meet the -G <size> threshold (MIPS)">;
def mnan_EQ : Joined<["-"], "mnan=">, Group<m_mips_Features_Group>;
def mabs_EQ : Joined<["-"], "mabs=">, Group<m_mips_Features_Group>;
def mabicalls : Flag<["-"], "mabicalls">, Group<m_mips_Features_Group>,
HelpText<"Enable SVR4-style position-independent code (Mips only)">;
def mno_abicalls : Flag<["-"], "mno-abicalls">, Group<m_mips_Features_Group>,
HelpText<"Disable SVR4-style position-independent code (Mips only)">;
def mno_crc : Flag<["-"], "mno-crc">, Group<m_mips_Features_Group>,
HelpText<"Disallow use of CRC instructions (Mips only)">;
def mvirt : Flag<["-"], "mvirt">, Group<m_mips_Features_Group>;
def mno_virt : Flag<["-"], "mno-virt">, Group<m_mips_Features_Group>;
def mginv : Flag<["-"], "mginv">, Group<m_mips_Features_Group>;
def mno_ginv : Flag<["-"], "mno-ginv">, Group<m_mips_Features_Group>;
def mips1 : Flag<["-"], "mips1">,
Alias<march_EQ>, AliasArgs<["mips1"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips1">, Flags<[HelpHidden]>;
def mips2 : Flag<["-"], "mips2">,
Alias<march_EQ>, AliasArgs<["mips2"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips2">, Flags<[HelpHidden]>;
def mips3 : Flag<["-"], "mips3">,
Alias<march_EQ>, AliasArgs<["mips3"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips3">, Flags<[HelpHidden]>;
def mips4 : Flag<["-"], "mips4">,
Alias<march_EQ>, AliasArgs<["mips4"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips4">, Flags<[HelpHidden]>;
def mips5 : Flag<["-"], "mips5">,
Alias<march_EQ>, AliasArgs<["mips5"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips5">, Flags<[HelpHidden]>;
def mips32 : Flag<["-"], "mips32">,
Alias<march_EQ>, AliasArgs<["mips32"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips32">, Flags<[HelpHidden]>;
def mips32r2 : Flag<["-"], "mips32r2">,
Alias<march_EQ>, AliasArgs<["mips32r2"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips32r2">, Flags<[HelpHidden]>;
def mips32r3 : Flag<["-"], "mips32r3">,
Alias<march_EQ>, AliasArgs<["mips32r3"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips32r3">, Flags<[HelpHidden]>;
def mips32r5 : Flag<["-"], "mips32r5">,
Alias<march_EQ>, AliasArgs<["mips32r5"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips32r5">, Flags<[HelpHidden]>;
def mips32r6 : Flag<["-"], "mips32r6">,
Alias<march_EQ>, AliasArgs<["mips32r6"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips32r6">, Flags<[HelpHidden]>;
def mips64 : Flag<["-"], "mips64">,
Alias<march_EQ>, AliasArgs<["mips64"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips64">, Flags<[HelpHidden]>;
def mips64r2 : Flag<["-"], "mips64r2">,
Alias<march_EQ>, AliasArgs<["mips64r2"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips64r2">, Flags<[HelpHidden]>;
def mips64r3 : Flag<["-"], "mips64r3">,
Alias<march_EQ>, AliasArgs<["mips64r3"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips64r3">, Flags<[HelpHidden]>;
def mips64r5 : Flag<["-"], "mips64r5">,
Alias<march_EQ>, AliasArgs<["mips64r5"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips64r5">, Flags<[HelpHidden]>;
def mips64r6 : Flag<["-"], "mips64r6">,
Alias<march_EQ>, AliasArgs<["mips64r6"]>, Group<m_mips_Features_Group>,
HelpText<"Equivalent to -march=mips64r6">, Flags<[HelpHidden]>;
def mfpxx : Flag<["-"], "mfpxx">, Group<m_mips_Features_Group>,
HelpText<"Avoid FPU mode dependent operations when used with the O32 ABI">,
Flags<[HelpHidden]>;
def modd_spreg : Flag<["-"], "modd-spreg">, Group<m_mips_Features_Group>,
HelpText<"Enable odd single-precision floating point registers">,
Flags<[HelpHidden]>;
def mno_odd_spreg : Flag<["-"], "mno-odd-spreg">, Group<m_mips_Features_Group>,
HelpText<"Disable odd single-precision floating point registers">,
Flags<[HelpHidden]>;
def mglibc : Flag<["-"], "mglibc">, Group<m_libc_Group>, Flags<[HelpHidden]>;
def muclibc : Flag<["-"], "muclibc">, Group<m_libc_Group>, Flags<[HelpHidden]>;
def module_file_info : Flag<["-"], "module-file-info">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>,
HelpText<"Provide information about a particular module file">;
def mthumb : Flag<["-"], "mthumb">, Group<m_Group>;
def mtune_EQ : Joined<["-"], "mtune=">, Group<m_Group>;
def multi__module : Flag<["-"], "multi_module">;
def multiply__defined__unused : Separate<["-"], "multiply_defined_unused">;
def multiply__defined : Separate<["-"], "multiply_defined">;
def mwarn_nonportable_cfstrings : Flag<["-"], "mwarn-nonportable-cfstrings">, Group<m_Group>;
def no_canonical_prefixes : Flag<["-"], "no-canonical-prefixes">, Flags<[HelpHidden, CoreOption]>,
HelpText<"Use relative instead of canonical paths">;
def no_cpp_precomp : Flag<["-"], "no-cpp-precomp">, Group<clang_ignored_f_Group>;
def no_integrated_cpp : Flag<["-", "--"], "no-integrated-cpp">, Flags<[DriverOption]>;
def no_pedantic : Flag<["-", "--"], "no-pedantic">, Group<pedantic_Group>;
def no__dead__strip__inits__and__terms : Flag<["-"], "no_dead_strip_inits_and_terms">;
def nobuiltininc : Flag<["-"], "nobuiltininc">, Flags<[CC1Option, CoreOption]>,
HelpText<"Disable builtin #include directories">;
def nocudainc : Flag<["-"], "nocudainc">;
def nocudalib : Flag<["-"], "nocudalib">;
def nodefaultlibs : Flag<["-"], "nodefaultlibs">;
def nofixprebinding : Flag<["-"], "nofixprebinding">;
def nolibc : Flag<["-"], "nolibc">;
def nomultidefs : Flag<["-"], "nomultidefs">;
def nopie : Flag<["-"], "nopie">;
def no_pie : Flag<["-"], "no-pie">, Alias<nopie>;
def noprebind : Flag<["-"], "noprebind">;
def noseglinkedit : Flag<["-"], "noseglinkedit">;
def nostartfiles : Flag<["-"], "nostartfiles">;
def nostdinc : Flag<["-"], "nostdinc">, Flags<[CoreOption]>;
def nostdlibinc : Flag<["-"], "nostdlibinc">;
def nostdincxx : Flag<["-"], "nostdinc++">, Flags<[CC1Option]>,
HelpText<"Disable standard #include directories for the C++ standard library">;
def nostdlib : Flag<["-"], "nostdlib">;
def nostdlibxx : Flag<["-"], "nostdlib++">;
def object : Flag<["-"], "object">;
def o : JoinedOrSeparate<["-"], "o">, Flags<[DriverOption, RenderAsInput, CC1Option, CC1AsOption]>,
HelpText<"Write output to <file>">, MetaVarName<"<file>">;
def pagezero__size : JoinedOrSeparate<["-"], "pagezero_size">;
def pass_exit_codes : Flag<["-", "--"], "pass-exit-codes">, Flags<[Unsupported]>;
def pedantic_errors : Flag<["-", "--"], "pedantic-errors">, Group<pedantic_Group>, Flags<[CC1Option]>;
def pedantic : Flag<["-", "--"], "pedantic">, Group<pedantic_Group>, Flags<[CC1Option]>;
def pg : Flag<["-"], "pg">, HelpText<"Enable mcount instrumentation">, Flags<[CC1Option]>;
def pipe : Flag<["-", "--"], "pipe">,
HelpText<"Use pipes between commands, when possible">;
def prebind__all__twolevel__modules : Flag<["-"], "prebind_all_twolevel_modules">;
def prebind : Flag<["-"], "prebind">;
def preload : Flag<["-"], "preload">;
def print_file_name_EQ : Joined<["-", "--"], "print-file-name=">,
HelpText<"Print the full library path of <file>">, MetaVarName<"<file>">;
def print_ivar_layout : Flag<["-"], "print-ivar-layout">, Flags<[CC1Option]>,
HelpText<"Enable Objective-C Ivar layout bitmap print trace">;
def print_libgcc_file_name : Flag<["-", "--"], "print-libgcc-file-name">,
HelpText<"Print the library path for the currently used compiler runtime "
"library (\"libgcc.a\" or \"libclang_rt.builtins.*.a\")">;
def print_multi_directory : Flag<["-", "--"], "print-multi-directory">;
def print_multi_lib : Flag<["-", "--"], "print-multi-lib">;
def print_multi_os_directory : Flag<["-", "--"], "print-multi-os-directory">,
Flags<[Unsupported]>;
def print_prog_name_EQ : Joined<["-", "--"], "print-prog-name=">,
HelpText<"Print the full program path of <name>">, MetaVarName<"<name>">;
def print_resource_dir : Flag<["-", "--"], "print-resource-dir">,
HelpText<"Print the resource directory pathname">;
def print_search_dirs : Flag<["-", "--"], "print-search-dirs">,
HelpText<"Print the paths used for finding libraries and programs">;
def private__bundle : Flag<["-"], "private_bundle">;
def pthreads : Flag<["-"], "pthreads">;
def pthread : Flag<["-"], "pthread">, Flags<[CC1Option]>,
HelpText<"Support POSIX threads in generated code">;
def no_pthread : Flag<["-"], "no-pthread">, Flags<[CC1Option]>;
def p : Flag<["-"], "p">;
def pie : Flag<["-"], "pie">;
def read__only__relocs : Separate<["-"], "read_only_relocs">;
def remap : Flag<["-"], "remap">;
def rewrite_objc : Flag<["-"], "rewrite-objc">, Flags<[DriverOption,CC1Option]>,
HelpText<"Rewrite Objective-C source to C++">, Group<Action_Group>;
def rewrite_legacy_objc : Flag<["-"], "rewrite-legacy-objc">, Flags<[DriverOption]>,
HelpText<"Rewrite Legacy Objective-C source to C++">;
def rdynamic : Flag<["-"], "rdynamic">;
def resource_dir : Separate<["-"], "resource-dir">,
Flags<[DriverOption, CC1Option, CoreOption, HelpHidden]>,
HelpText<"The directory which holds the compiler resource files">;
def resource_dir_EQ : Joined<["-"], "resource-dir=">, Flags<[DriverOption, CoreOption]>,
Alias<resource_dir>;
def rpath : Separate<["-"], "rpath">, Flags<[LinkerInput]>, Group<Link_Group>;
def rtlib_EQ : Joined<["-", "--"], "rtlib=">,
HelpText<"Compiler runtime library to use">;
def frtlib_add_rpath: Flag<["-"], "frtlib-add-rpath">, Flags<[NoArgumentUnused]>,
HelpText<"Add -rpath with architecture-specific resource directory to the linker flags">;
def fno_rtlib_add_rpath: Flag<["-"], "fno-rtlib-add-rpath">, Flags<[NoArgumentUnused]>,
HelpText<"Do not add -rpath with architecture-specific resource directory to the linker flags">;
def r : Flag<["-"], "r">, Flags<[LinkerInput,NoArgumentUnused]>,
Group<Link_Group>;
def save_temps_EQ : Joined<["-", "--"], "save-temps=">, Flags<[CC1Option, DriverOption]>,
HelpText<"Save intermediate compilation results.">;
def save_temps : Flag<["-", "--"], "save-temps">, Flags<[DriverOption]>,
Alias<save_temps_EQ>, AliasArgs<["cwd"]>,
HelpText<"Save intermediate compilation results">;
def save_stats_EQ : Joined<["-", "--"], "save-stats=">, Flags<[DriverOption]>,
HelpText<"Save llvm statistics.">;
def save_stats : Flag<["-", "--"], "save-stats">, Flags<[DriverOption]>,
Alias<save_stats_EQ>, AliasArgs<["cwd"]>,
HelpText<"Save llvm statistics.">;
def via_file_asm : Flag<["-", "--"], "via-file-asm">, InternalDebugOpt,
HelpText<"Write assembly to file for input to assemble jobs">;
def sectalign : MultiArg<["-"], "sectalign", 3>;
def sectcreate : MultiArg<["-"], "sectcreate", 3>;
def sectobjectsymbols : MultiArg<["-"], "sectobjectsymbols", 2>;
def sectorder : MultiArg<["-"], "sectorder", 3>;
def seg1addr : JoinedOrSeparate<["-"], "seg1addr">;
def seg__addr__table__filename : Separate<["-"], "seg_addr_table_filename">;
def seg__addr__table : Separate<["-"], "seg_addr_table">;
def segaddr : MultiArg<["-"], "segaddr", 2>;
def segcreate : MultiArg<["-"], "segcreate", 3>;
def seglinkedit : Flag<["-"], "seglinkedit">;
def segprot : MultiArg<["-"], "segprot", 3>;
def segs__read__only__addr : Separate<["-"], "segs_read_only_addr">;
def segs__read__write__addr : Separate<["-"], "segs_read_write_addr">;
def segs__read__ : Joined<["-"], "segs_read_">;
def shared_libgcc : Flag<["-"], "shared-libgcc">;
def shared : Flag<["-", "--"], "shared">;
def single__module : Flag<["-"], "single_module">;
def specs_EQ : Joined<["-", "--"], "specs=">;
def specs : Separate<["-", "--"], "specs">, Flags<[Unsupported]>;
def static_libgcc : Flag<["-"], "static-libgcc">;
def static_libstdcxx : Flag<["-"], "static-libstdc++">;
def static : Flag<["-", "--"], "static">, Flags<[NoArgumentUnused]>;
def std_default_EQ : Joined<["-"], "std-default=">;
def std_EQ : Joined<["-", "--"], "std=">, Flags<[CC1Option]>,
Group<CompileOnly_Group>, HelpText<"Language standard to compile for">,
ValuesCode<[{
const char *Values =
#define LANGSTANDARD(id, name, lang, desc, features) name ","
#define LANGSTANDARD_ALIAS(id, alias) alias ","
#include "clang/Frontend/LangStandards.def"
;
}]>;
def stdlib_EQ : Joined<["-", "--"], "stdlib=">, Flags<[CC1Option]>,
HelpText<"C++ standard library to use">, Values<"libc++,libstdc++,platform">;
def sub__library : JoinedOrSeparate<["-"], "sub_library">;
def sub__umbrella : JoinedOrSeparate<["-"], "sub_umbrella">;
def system_header_prefix : Joined<["--"], "system-header-prefix=">,
Group<clang_i_Group>, Flags<[CC1Option]>, MetaVarName<"<prefix>">,
HelpText<"Treat all #include paths starting with <prefix> as including a "
"system header.">;
def : Separate<["--"], "system-header-prefix">, Alias<system_header_prefix>;
def no_system_header_prefix : Joined<["--"], "no-system-header-prefix=">,
Group<clang_i_Group>, Flags<[CC1Option]>, MetaVarName<"<prefix>">,
HelpText<"Treat all #include paths starting with <prefix> as not including a "
"system header.">;
def : Separate<["--"], "no-system-header-prefix">, Alias<no_system_header_prefix>;
def s : Flag<["-"], "s">, Group<Link_Group>;
def target : Joined<["--"], "target=">, Flags<[DriverOption, CoreOption]>,
HelpText<"Generate code for the given target">;
def gcc_toolchain : Joined<["--"], "gcc-toolchain=">, Flags<[DriverOption]>,
HelpText<"Use the gcc toolchain at the given directory">;
def time : Flag<["-"], "time">,
HelpText<"Time individual commands">;
def traditional_cpp : Flag<["-", "--"], "traditional-cpp">, Flags<[CC1Option]>,
HelpText<"Enable some traditional CPP emulation">;
def traditional : Flag<["-", "--"], "traditional">;
def trigraphs : Flag<["-", "--"], "trigraphs">, Alias<ftrigraphs>,
HelpText<"Process trigraph sequences">;
def twolevel__namespace__hints : Flag<["-"], "twolevel_namespace_hints">;
def twolevel__namespace : Flag<["-"], "twolevel_namespace">;
def t : Flag<["-"], "t">, Group<Link_Group>;
def umbrella : Separate<["-"], "umbrella">;
def undefined : JoinedOrSeparate<["-"], "undefined">, Group<u_Group>;
def undef : Flag<["-"], "undef">, Group<u_Group>, Flags<[CC1Option]>,
HelpText<"undef all system defines">;
def unexported__symbols__list : Separate<["-"], "unexported_symbols_list">;
def u : JoinedOrSeparate<["-"], "u">, Group<u_Group>;
def v : Flag<["-"], "v">, Flags<[CC1Option, CoreOption]>,
HelpText<"Show commands to run and use verbose output">;
def verify_debug_info : Flag<["--"], "verify-debug-info">, Flags<[DriverOption]>,
HelpText<"Verify the binary representation of debug output">;
def weak_l : Joined<["-"], "weak-l">, Flags<[LinkerInput]>;
def weak__framework : Separate<["-"], "weak_framework">, Flags<[LinkerInput]>;
def weak__library : Separate<["-"], "weak_library">, Flags<[LinkerInput]>;
def weak__reference__mismatches : Separate<["-"], "weak_reference_mismatches">;
def whatsloaded : Flag<["-"], "whatsloaded">;
def whyload : Flag<["-"], "whyload">;
def w : Flag<["-"], "w">, HelpText<"Suppress all warnings">, Flags<[CC1Option]>;
def x : JoinedOrSeparate<["-"], "x">, Flags<[DriverOption,CC1Option]>,
HelpText<"Treat subsequent input files as having type <language>">,
MetaVarName<"<language>">;
def y : Joined<["-"], "y">;
def fintegrated_as : Flag<["-"], "fintegrated-as">, Flags<[DriverOption]>,
Group<f_Group>, HelpText<"Enable the integrated assembler">;
def fno_integrated_as : Flag<["-"], "fno-integrated-as">,
Flags<[CC1Option, DriverOption]>, Group<f_Group>,
HelpText<"Disable the integrated assembler">;
def : Flag<["-"], "integrated-as">, Alias<fintegrated_as>, Flags<[DriverOption]>;
def : Flag<["-"], "no-integrated-as">, Alias<fno_integrated_as>,
Flags<[CC1Option, DriverOption]>;
def working_directory : JoinedOrSeparate<["-"], "working-directory">, Flags<[CC1Option]>,
HelpText<"Resolve file paths relative to the specified directory">;
def working_directory_EQ : Joined<["-"], "working-directory=">, Flags<[CC1Option]>,
Alias<working_directory>;
// Double dash options, which are usually an alias for one of the previous
// options.
def _mhwdiv_EQ : Joined<["--"], "mhwdiv=">, Alias<mhwdiv_EQ>;
def _mhwdiv : Separate<["--"], "mhwdiv">, Alias<mhwdiv_EQ>;
def _CLASSPATH_EQ : Joined<["--"], "CLASSPATH=">, Alias<fclasspath_EQ>;
def _CLASSPATH : Separate<["--"], "CLASSPATH">, Alias<fclasspath_EQ>;
def _all_warnings : Flag<["--"], "all-warnings">, Alias<Wall>;
def _analyze_auto : Flag<["--"], "analyze-auto">, Flags<[DriverOption]>;
def _analyzer_no_default_checks : Flag<["--"], "analyzer-no-default-checks">, Flags<[DriverOption]>;
def _analyzer_output : JoinedOrSeparate<["--"], "analyzer-output">, Flags<[DriverOption]>,
HelpText<"Static analyzer report output format (html|plist|plist-multi-file|plist-html|text).">;
def _analyze : Flag<["--"], "analyze">, Flags<[DriverOption, CoreOption]>,
HelpText<"Run the static analyzer">;
def _assemble : Flag<["--"], "assemble">, Alias<S>;
def _assert_EQ : Joined<["--"], "assert=">, Alias<A>;
def _assert : Separate<["--"], "assert">, Alias<A>;
def _bootclasspath_EQ : Joined<["--"], "bootclasspath=">, Alias<fbootclasspath_EQ>;
def _bootclasspath : Separate<["--"], "bootclasspath">, Alias<fbootclasspath_EQ>;
def _classpath_EQ : Joined<["--"], "classpath=">, Alias<fclasspath_EQ>;
def _classpath : Separate<["--"], "classpath">, Alias<fclasspath_EQ>;
def _comments_in_macros : Flag<["--"], "comments-in-macros">, Alias<CC>;
def _comments : Flag<["--"], "comments">, Alias<C>;
def _compile : Flag<["--"], "compile">, Alias<c>;
def _constant_cfstrings : Flag<["--"], "constant-cfstrings">;
def _debug_EQ : Joined<["--"], "debug=">, Alias<g_Flag>;
def _debug : Flag<["--"], "debug">, Alias<g_Flag>;
def _define_macro_EQ : Joined<["--"], "define-macro=">, Alias<D>;
def _define_macro : Separate<["--"], "define-macro">, Alias<D>;
def _dependencies : Flag<["--"], "dependencies">, Alias<M>;
def _dyld_prefix_EQ : Joined<["--"], "dyld-prefix=">;
def _dyld_prefix : Separate<["--"], "dyld-prefix">, Alias<_dyld_prefix_EQ>;
def _encoding_EQ : Joined<["--"], "encoding=">, Alias<fencoding_EQ>;
def _encoding : Separate<["--"], "encoding">, Alias<fencoding_EQ>;
def _entry : Flag<["--"], "entry">, Alias<e>;
def _extdirs_EQ : Joined<["--"], "extdirs=">, Alias<fextdirs_EQ>;
def _extdirs : Separate<["--"], "extdirs">, Alias<fextdirs_EQ>;
def _extra_warnings : Flag<["--"], "extra-warnings">, Alias<W_Joined>;
def _for_linker_EQ : Joined<["--"], "for-linker=">, Alias<Xlinker>;
def _for_linker : Separate<["--"], "for-linker">, Alias<Xlinker>;
def _force_link_EQ : Joined<["--"], "force-link=">, Alias<u>;
def _force_link : Separate<["--"], "force-link">, Alias<u>;
def _help_hidden : Flag<["--"], "help-hidden">,
HelpText<"Display help for hidden options">;
def _imacros_EQ : Joined<["--"], "imacros=">, Alias<imacros>;
def _include_barrier : Flag<["--"], "include-barrier">, Alias<I_>;
def _include_directory_after_EQ : Joined<["--"], "include-directory-after=">, Alias<idirafter>;
def _include_directory_after : Separate<["--"], "include-directory-after">, Alias<idirafter>;
def _include_directory_EQ : Joined<["--"], "include-directory=">, Alias<I>;
def _include_directory : Separate<["--"], "include-directory">, Alias<I>;
def _include_prefix_EQ : Joined<["--"], "include-prefix=">, Alias<iprefix>;
def _include_prefix : Separate<["--"], "include-prefix">, Alias<iprefix>;
def _include_with_prefix_after_EQ : Joined<["--"], "include-with-prefix-after=">, Alias<iwithprefix>;
def _include_with_prefix_after : Separate<["--"], "include-with-prefix-after">, Alias<iwithprefix>;
def _include_with_prefix_before_EQ : Joined<["--"], "include-with-prefix-before=">, Alias<iwithprefixbefore>;
def _include_with_prefix_before : Separate<["--"], "include-with-prefix-before">, Alias<iwithprefixbefore>;
def _include_with_prefix_EQ : Joined<["--"], "include-with-prefix=">, Alias<iwithprefix>;
def _include_with_prefix : Separate<["--"], "include-with-prefix">, Alias<iwithprefix>;
def _include_EQ : Joined<["--"], "include=">, Alias<include_>;
def _language_EQ : Joined<["--"], "language=">, Alias<x>;
def _language : Separate<["--"], "language">, Alias<x>;
def _library_directory_EQ : Joined<["--"], "library-directory=">, Alias<L>;
def _library_directory : Separate<["--"], "library-directory">, Alias<L>;
def _no_line_commands : Flag<["--"], "no-line-commands">, Alias<P>;
def _no_standard_includes : Flag<["--"], "no-standard-includes">, Alias<nostdinc>;
def _no_standard_libraries : Flag<["--"], "no-standard-libraries">, Alias<nostdlib>;
def _no_undefined : Flag<["--"], "no-undefined">, Flags<[LinkerInput]>;
def _no_warnings : Flag<["--"], "no-warnings">, Alias<w>;
def _optimize_EQ : Joined<["--"], "optimize=">, Alias<O>;
def _optimize : Flag<["--"], "optimize">, Alias<O>;
def _output_class_directory_EQ : Joined<["--"], "output-class-directory=">, Alias<foutput_class_dir_EQ>;
def _output_class_directory : Separate<["--"], "output-class-directory">, Alias<foutput_class_dir_EQ>;
def _output_EQ : Joined<["--"], "output=">, Alias<o>;
def _output : Separate<["--"], "output">, Alias<o>;
def _param : Separate<["--"], "param">, Group<CompileOnly_Group>;
def _param_EQ : Joined<["--"], "param=">, Alias<_param>;
def _precompile : Flag<["--"], "precompile">, Flags<[DriverOption]>,
Group<Action_Group>, HelpText<"Only precompile the input">;
def _prefix_EQ : Joined<["--"], "prefix=">, Alias<B>;
def _prefix : Separate<["--"], "prefix">, Alias<B>;
def _preprocess : Flag<["--"], "preprocess">, Alias<E>;
def _print_diagnostic_categories : Flag<["--"], "print-diagnostic-categories">;
def _print_file_name : Separate<["--"], "print-file-name">, Alias<print_file_name_EQ>;
def _print_missing_file_dependencies : Flag<["--"], "print-missing-file-dependencies">, Alias<MG>;
def _print_prog_name : Separate<["--"], "print-prog-name">, Alias<print_prog_name_EQ>;
def _profile_blocks : Flag<["--"], "profile-blocks">, Alias<a>;
def _profile : Flag<["--"], "profile">, Alias<p>;
def _resource_EQ : Joined<["--"], "resource=">, Alias<fcompile_resource_EQ>;
def _resource : Separate<["--"], "resource">, Alias<fcompile_resource_EQ>;
def _rtlib : Separate<["--"], "rtlib">, Alias<rtlib_EQ>;
def _serialize_diags : Separate<["-", "--"], "serialize-diagnostics">, Flags<[DriverOption]>,
HelpText<"Serialize compiler diagnostics to a file">;
// We give --version different semantics from -version.
def _version : Flag<["--"], "version">, Flags<[CoreOption, CC1Option]>,
HelpText<"Print version information">;
def _signed_char : Flag<["--"], "signed-char">, Alias<fsigned_char>;
def _std : Separate<["--"], "std">, Alias<std_EQ>;
def _stdlib : Separate<["--"], "stdlib">, Alias<stdlib_EQ>;
def _sysroot_EQ : Joined<["--"], "sysroot=">;
def _sysroot : Separate<["--"], "sysroot">, Alias<_sysroot_EQ>;
def _target_help : Flag<["--"], "target-help">;
def _trace_includes : Flag<["--"], "trace-includes">, Alias<H>;
def _undefine_macro_EQ : Joined<["--"], "undefine-macro=">, Alias<U>;
def _undefine_macro : Separate<["--"], "undefine-macro">, Alias<U>;
def _unsigned_char : Flag<["--"], "unsigned-char">, Alias<funsigned_char>;
def _user_dependencies : Flag<["--"], "user-dependencies">, Alias<MM>;
def _verbose : Flag<["--"], "verbose">, Alias<v>;
def _warn__EQ : Joined<["--"], "warn-=">, Alias<W_Joined>;
def _warn_ : Joined<["--"], "warn-">, Alias<W_Joined>;
def _write_dependencies : Flag<["--"], "write-dependencies">, Alias<MD>;
def _write_user_dependencies : Flag<["--"], "write-user-dependencies">, Alias<MMD>;
def _ : Joined<["--"], "">, Flags<[Unsupported]>;
// Hexagon feature flags.
def mieee_rnd_near : Flag<["-"], "mieee-rnd-near">,
Group<m_hexagon_Features_Group>;
def mv4 : Flag<["-"], "mv4">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv4"]>;
def mv5 : Flag<["-"], "mv5">, Group<m_hexagon_Features_Group>, Alias<mcpu_EQ>,
AliasArgs<["hexagonv5"]>;
def mv55 : Flag<["-"], "mv55">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv55"]>;
def mv60 : Flag<["-"], "mv60">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv60"]>;
def mv62 : Flag<["-"], "mv62">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv62"]>;
def mv65 : Flag<["-"], "mv65">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv65"]>;
def mhexagon_hvx : Flag<["-"], "mhvx">, Group<m_hexagon_Features_HVX_Group>,
HelpText<"Enable Hexagon Vector eXtensions">;
def mhexagon_hvx_EQ : Joined<["-"], "mhvx=">,
Group<m_hexagon_Features_HVX_Group>,
HelpText<"Enable Hexagon Vector eXtensions">;
def mno_hexagon_hvx : Flag<["-"], "mno-hvx">,
Group<m_hexagon_Features_HVX_Group>,
HelpText<"Disable Hexagon Vector eXtensions">;
def mhexagon_hvx_length_EQ : Joined<["-"], "mhvx-length=">,
Group<m_hexagon_Features_HVX_Group>, HelpText<"Set Hexagon Vector Length">,
Values<"64B,128B">;
def ffixed_r19: Flag<["-"], "ffixed-r19">,
HelpText<"Reserve register r19 (Hexagon only)">;
def mmemops : Flag<["-"], "mmemops">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Enable generation of memop instructions">;
def mno_memops : Flag<["-"], "mno-memops">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Disable generation of memop instructions">;
def mpackets : Flag<["-"], "mpackets">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Enable generation of instruction packets">;
def mno_packets : Flag<["-"], "mno-packets">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Disable generation of instruction packets">;
def mnvj : Flag<["-"], "mnvj">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Enable generation of new-value jumps">;
def mno_nvj : Flag<["-"], "mno-nvj">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Disable generation of new-value jumps">;
def mnvs : Flag<["-"], "mnvs">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Enable generation of new-value stores">;
def mno_nvs : Flag<["-"], "mno-nvs">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Disable generation of new-value stores">;
// X86 feature flags
def mx87 : Flag<["-"], "mx87">, Group<m_x86_Features_Group>;
def mno_x87 : Flag<["-"], "mno-x87">, Group<m_x86_Features_Group>;
def m80387 : Flag<["-"], "m80387">, Alias<mx87>;
def mno_80387 : Flag<["-"], "mno-80387">, Alias<mno_x87>;
def mmmx : Flag<["-"], "mmmx">, Group<m_x86_Features_Group>;
def mno_mmx : Flag<["-"], "mno-mmx">, Group<m_x86_Features_Group>;
def m3dnow : Flag<["-"], "m3dnow">, Group<m_x86_Features_Group>;
def mno_3dnow : Flag<["-"], "mno-3dnow">, Group<m_x86_Features_Group>;
def m3dnowa : Flag<["-"], "m3dnowa">, Group<m_x86_Features_Group>;
def mno_3dnowa : Flag<["-"], "mno-3dnowa">, Group<m_x86_Features_Group>;
def msse : Flag<["-"], "msse">, Group<m_x86_Features_Group>;
def mno_sse : Flag<["-"], "mno-sse">, Group<m_x86_Features_Group>;
def msse2 : Flag<["-"], "msse2">, Group<m_x86_Features_Group>;
def mno_sse2 : Flag<["-"], "mno-sse2">, Group<m_x86_Features_Group>;
def msse3 : Flag<["-"], "msse3">, Group<m_x86_Features_Group>;
def mno_sse3 : Flag<["-"], "mno-sse3">, Group<m_x86_Features_Group>;
def mssse3 : Flag<["-"], "mssse3">, Group<m_x86_Features_Group>;
def mno_ssse3 : Flag<["-"], "mno-ssse3">, Group<m_x86_Features_Group>;
def msse4_1 : Flag<["-"], "msse4.1">, Group<m_x86_Features_Group>;
def mno_sse4_1 : Flag<["-"], "mno-sse4.1">, Group<m_x86_Features_Group>;
def msse4_2 : Flag<["-"], "msse4.2">, Group<m_x86_Features_Group>;
def mno_sse4_2 : Flag<["-"], "mno-sse4.2">, Group<m_x86_Features_Group>;
def msse4 : Flag<["-"], "msse4">, Alias<msse4_2>;
// -mno-sse4 turns off sse4.1 which has the effect of turning off everything
// later than 4.1. -msse4 turns on 4.2 which has the effect of turning on
// everything earlier than 4.2.
def mno_sse4 : Flag<["-"], "mno-sse4">, Alias<mno_sse4_1>;
def msse4a : Flag<["-"], "msse4a">, Group<m_x86_Features_Group>;
def mno_sse4a : Flag<["-"], "mno-sse4a">, Group<m_x86_Features_Group>;
def mavx : Flag<["-"], "mavx">, Group<m_x86_Features_Group>;
def mno_avx : Flag<["-"], "mno-avx">, Group<m_x86_Features_Group>;
def mavx2 : Flag<["-"], "mavx2">, Group<m_x86_Features_Group>;
def mno_avx2 : Flag<["-"], "mno-avx2">, Group<m_x86_Features_Group>;
def mavx512f : Flag<["-"], "mavx512f">, Group<m_x86_Features_Group>;
def mno_avx512f : Flag<["-"], "mno-avx512f">, Group<m_x86_Features_Group>;
def mavx512bitalg : Flag<["-"], "mavx512bitalg">, Group<m_x86_Features_Group>;
def mno_avx512bitalg : Flag<["-"], "mno-avx512bitalg">, Group<m_x86_Features_Group>;
def mavx512bw : Flag<["-"], "mavx512bw">, Group<m_x86_Features_Group>;
def mno_avx512bw : Flag<["-"], "mno-avx512bw">, Group<m_x86_Features_Group>;
def mavx512cd : Flag<["-"], "mavx512cd">, Group<m_x86_Features_Group>;
def mno_avx512cd : Flag<["-"], "mno-avx512cd">, Group<m_x86_Features_Group>;
def mavx512dq : Flag<["-"], "mavx512dq">, Group<m_x86_Features_Group>;
def mno_avx512dq : Flag<["-"], "mno-avx512dq">, Group<m_x86_Features_Group>;
def mavx512er : Flag<["-"], "mavx512er">, Group<m_x86_Features_Group>;
def mno_avx512er : Flag<["-"], "mno-avx512er">, Group<m_x86_Features_Group>;
def mavx512ifma : Flag<["-"], "mavx512ifma">, Group<m_x86_Features_Group>;
def mno_avx512ifma : Flag<["-"], "mno-avx512ifma">, Group<m_x86_Features_Group>;
def mavx512pf : Flag<["-"], "mavx512pf">, Group<m_x86_Features_Group>;
def mno_avx512pf : Flag<["-"], "mno-avx512pf">, Group<m_x86_Features_Group>;
def mavx512vbmi : Flag<["-"], "mavx512vbmi">, Group<m_x86_Features_Group>;
def mno_avx512vbmi : Flag<["-"], "mno-avx512vbmi">, Group<m_x86_Features_Group>;
def mavx512vbmi2 : Flag<["-"], "mavx512vbmi2">, Group<m_x86_Features_Group>;
def mno_avx512vbmi2 : Flag<["-"], "mno-avx512vbmi2">, Group<m_x86_Features_Group>;
def mavx512vl : Flag<["-"], "mavx512vl">, Group<m_x86_Features_Group>;
def mno_avx512vl : Flag<["-"], "mno-avx512vl">, Group<m_x86_Features_Group>;
def mavx512vnni : Flag<["-"], "mavx512vnni">, Group<m_x86_Features_Group>;
def mno_avx512vnni : Flag<["-"], "mno-avx512vnni">, Group<m_x86_Features_Group>;
def mavx512vpopcntdq : Flag<["-"], "mavx512vpopcntdq">, Group<m_x86_Features_Group>;
def mno_avx512vpopcntdq : Flag<["-"], "mno-avx512vpopcntdq">, Group<m_x86_Features_Group>;
def madx : Flag<["-"], "madx">, Group<m_x86_Features_Group>;
def mno_adx : Flag<["-"], "mno-adx">, Group<m_x86_Features_Group>;
def maes : Flag<["-"], "maes">, Group<m_x86_Features_Group>;
def mno_aes : Flag<["-"], "mno-aes">, Group<m_x86_Features_Group>;
def mbmi : Flag<["-"], "mbmi">, Group<m_x86_Features_Group>;
def mno_bmi : Flag<["-"], "mno-bmi">, Group<m_x86_Features_Group>;
def mbmi2 : Flag<["-"], "mbmi2">, Group<m_x86_Features_Group>;
def mno_bmi2 : Flag<["-"], "mno-bmi2">, Group<m_x86_Features_Group>;
def mcldemote : Flag<["-"], "mcldemote">, Group<m_x86_Features_Group>;
def mno_cldemote : Flag<["-"], "mno-cldemote">, Group<m_x86_Features_Group>;
def mclflushopt : Flag<["-"], "mclflushopt">, Group<m_x86_Features_Group>;
def mno_clflushopt : Flag<["-"], "mno-clflushopt">, Group<m_x86_Features_Group>;
def mclwb : Flag<["-"], "mclwb">, Group<m_x86_Features_Group>;
def mno_clwb : Flag<["-"], "mno-clwb">, Group<m_x86_Features_Group>;
def mwbnoinvd : Flag<["-"], "mwbnoinvd">, Group<m_x86_Features_Group>;
def mno_wbnoinvd : Flag<["-"], "mno-wbnoinvd">, Group<m_x86_Features_Group>;
def mclzero : Flag<["-"], "mclzero">, Group<m_x86_Features_Group>;
def mno_clzero : Flag<["-"], "mno-clzero">, Group<m_x86_Features_Group>;
def mcx16 : Flag<["-"], "mcx16">, Group<m_x86_Features_Group>;
def mno_cx16 : Flag<["-"], "mno-cx16">, Group<m_x86_Features_Group>;
def mf16c : Flag<["-"], "mf16c">, Group<m_x86_Features_Group>;
def mno_f16c : Flag<["-"], "mno-f16c">, Group<m_x86_Features_Group>;
def mfma : Flag<["-"], "mfma">, Group<m_x86_Features_Group>;
def mno_fma : Flag<["-"], "mno-fma">, Group<m_x86_Features_Group>;
def mfma4 : Flag<["-"], "mfma4">, Group<m_x86_Features_Group>;
def mno_fma4 : Flag<["-"], "mno-fma4">, Group<m_x86_Features_Group>;
def mfsgsbase : Flag<["-"], "mfsgsbase">, Group<m_x86_Features_Group>;
def mno_fsgsbase : Flag<["-"], "mno-fsgsbase">, Group<m_x86_Features_Group>;
def mfxsr : Flag<["-"], "mfxsr">, Group<m_x86_Features_Group>;
def mno_fxsr : Flag<["-"], "mno-fxsr">, Group<m_x86_Features_Group>;
def minvpcid : Flag<["-"], "minvpcid">, Group<m_x86_Features_Group>;
def mno_invpcid : Flag<["-"], "mno-invpcid">, Group<m_x86_Features_Group>;
def mgfni : Flag<["-"], "mgfni">, Group<m_x86_Features_Group>;
def mno_gfni : Flag<["-"], "mno-gfni">, Group<m_x86_Features_Group>;
def mlwp : Flag<["-"], "mlwp">, Group<m_x86_Features_Group>;
def mno_lwp : Flag<["-"], "mno-lwp">, Group<m_x86_Features_Group>;
def mlzcnt : Flag<["-"], "mlzcnt">, Group<m_x86_Features_Group>;
def mno_lzcnt : Flag<["-"], "mno-lzcnt">, Group<m_x86_Features_Group>;
def mmovbe : Flag<["-"], "mmovbe">, Group<m_x86_Features_Group>;
def mno_movbe : Flag<["-"], "mno-movbe">, Group<m_x86_Features_Group>;
def mmovdiri : Flag<["-"], "mmovdiri">, Group<m_x86_Features_Group>;
def mno_movdiri : Flag<["-"], "mno-movdiri">, Group<m_x86_Features_Group>;
def mmovdir64b : Flag<["-"], "mmovdir64b">, Group<m_x86_Features_Group>;
def mno_movdir64b : Flag<["-"], "mno-movdir64b">, Group<m_x86_Features_Group>;
def mmpx : Flag<["-"], "mmpx">, Group<m_x86_Features_Group>;
def mno_mpx : Flag<["-"], "mno-mpx">, Group<m_x86_Features_Group>;
def mmwaitx : Flag<["-"], "mmwaitx">, Group<m_x86_Features_Group>;
def mno_mwaitx : Flag<["-"], "mno-mwaitx">, Group<m_x86_Features_Group>;
def mpku : Flag<["-"], "mpku">, Group<m_x86_Features_Group>;
def mno_pku : Flag<["-"], "mno-pku">, Group<m_x86_Features_Group>;
def mpclmul : Flag<["-"], "mpclmul">, Group<m_x86_Features_Group>;
def mno_pclmul : Flag<["-"], "mno-pclmul">, Group<m_x86_Features_Group>;
def mpconfig : Flag<["-"], "mpconfig">, Group<m_x86_Features_Group>;
def mno_pconfig : Flag<["-"], "mno-pconfig">, Group<m_x86_Features_Group>;
def mpopcnt : Flag<["-"], "mpopcnt">, Group<m_x86_Features_Group>;
def mno_popcnt : Flag<["-"], "mno-popcnt">, Group<m_x86_Features_Group>;
def mprefetchwt1 : Flag<["-"], "mprefetchwt1">, Group<m_x86_Features_Group>;
def mno_prefetchwt1 : Flag<["-"], "mno-prefetchwt1">, Group<m_x86_Features_Group>;
def mprfchw : Flag<["-"], "mprfchw">, Group<m_x86_Features_Group>;
def mno_prfchw : Flag<["-"], "mno-prfchw">, Group<m_x86_Features_Group>;
def mptwrite : Flag<["-"], "mptwrite">, Group<m_x86_Features_Group>;
def mno_ptwrite : Flag<["-"], "mno-ptwrite">, Group<m_x86_Features_Group>;
def mrdpid : Flag<["-"], "mrdpid">, Group<m_x86_Features_Group>;
def mno_rdpid : Flag<["-"], "mno-rdpid">, Group<m_x86_Features_Group>;
def mrdrnd : Flag<["-"], "mrdrnd">, Group<m_x86_Features_Group>;
def mno_rdrnd : Flag<["-"], "mno-rdrnd">, Group<m_x86_Features_Group>;
def mrtm : Flag<["-"], "mrtm">, Group<m_x86_Features_Group>;
def mno_rtm : Flag<["-"], "mno-rtm">, Group<m_x86_Features_Group>;
def mrdseed : Flag<["-"], "mrdseed">, Group<m_x86_Features_Group>;
def mno_rdseed : Flag<["-"], "mno-rdseed">, Group<m_x86_Features_Group>;
def msahf : Flag<["-"], "msahf">, Group<m_x86_Features_Group>;
def mno_sahf : Flag<["-"], "mno-sahf">, Group<m_x86_Features_Group>;
def msgx : Flag<["-"], "msgx">, Group<m_x86_Features_Group>;
def mno_sgx : Flag<["-"], "mno-sgx">, Group<m_x86_Features_Group>;
def msha : Flag<["-"], "msha">, Group<m_x86_Features_Group>;
def mno_sha : Flag<["-"], "mno-sha">, Group<m_x86_Features_Group>;
def mtbm : Flag<["-"], "mtbm">, Group<m_x86_Features_Group>;
def mno_tbm : Flag<["-"], "mno-tbm">, Group<m_x86_Features_Group>;
def mvaes : Flag<["-"], "mvaes">, Group<m_x86_Features_Group>;
def mno_vaes : Flag<["-"], "mno-vaes">, Group<m_x86_Features_Group>;
def mvpclmulqdq : Flag<["-"], "mvpclmulqdq">, Group<m_x86_Features_Group>;
def mno_vpclmulqdq : Flag<["-"], "mno-vpclmulqdq">, Group<m_x86_Features_Group>;
def mwaitpkg : Flag<["-"], "mwaitpkg">, Group<m_x86_Features_Group>;
def mno_waitpkg : Flag<["-"], "mno-waitpkg">, Group<m_x86_Features_Group>;
def mxop : Flag<["-"], "mxop">, Group<m_x86_Features_Group>;
def mno_xop : Flag<["-"], "mno-xop">, Group<m_x86_Features_Group>;
def mxsave : Flag<["-"], "mxsave">, Group<m_x86_Features_Group>;
def mno_xsave : Flag<["-"], "mno-xsave">, Group<m_x86_Features_Group>;
def mxsavec : Flag<["-"], "mxsavec">, Group<m_x86_Features_Group>;
def mno_xsavec : Flag<["-"], "mno-xsavec">, Group<m_x86_Features_Group>;
def mxsaveopt : Flag<["-"], "mxsaveopt">, Group<m_x86_Features_Group>;
def mno_xsaveopt : Flag<["-"], "mno-xsaveopt">, Group<m_x86_Features_Group>;
def mxsaves : Flag<["-"], "mxsaves">, Group<m_x86_Features_Group>;
def mno_xsaves : Flag<["-"], "mno-xsaves">, Group<m_x86_Features_Group>;
def mshstk : Flag<["-"], "mshstk">, Group<m_x86_Features_Group>;
def mno_shstk : Flag<["-"], "mno-shstk">, Group<m_x86_Features_Group>;
def mretpoline : Flag<["-"], "mretpoline">, Group<m_x86_Features_Group>;
def mno_retpoline : Flag<["-"], "mno-retpoline">, Group<m_x86_Features_Group>;
def mretpoline_external_thunk : Flag<["-"], "mretpoline-external-thunk">, Group<m_x86_Features_Group>;
def mno_retpoline_external_thunk : Flag<["-"], "mno-retpoline-external-thunk">, Group<m_x86_Features_Group>;
// These are legacy user-facing driver-level option spellings. They are always
// aliases for options that are spelled using the more common Unix / GNU flag
// style of double-dash and equals-joined flags.
def gcc_toolchain_legacy_spelling : Separate<["-"], "gcc-toolchain">, Alias<gcc_toolchain>;
def target_legacy_spelling : Separate<["-"], "target">, Alias<target>;
// Special internal option to handle -Xlinker --no-demangle.
def Z_Xlinker__no_demangle : Flag<["-"], "Z-Xlinker-no-demangle">,
Flags<[Unsupported, NoArgumentUnused]>;
// Special internal option to allow forwarding arbitrary arguments to linker.
def Zlinker_input : Separate<["-"], "Zlinker-input">,
Flags<[Unsupported, NoArgumentUnused]>;
// Reserved library options.
def Z_reserved_lib_stdcxx : Flag<["-"], "Z-reserved-lib-stdc++">,
Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
def Z_reserved_lib_cckext : Flag<["-"], "Z-reserved-lib-cckext">,
Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
// Ignored options
// FIXME: multiclasess produce suffixes, not prefixes. This is fine for now
// since it is only used in ignored options.
multiclass BooleanFFlag<string name> {
def _f : Flag<["-"], "f"#name>;
def _fno : Flag<["-"], "fno-"#name>;
}
defm : BooleanFFlag<"keep-inline-functions">, Group<clang_ignored_gcc_optimization_f_Group>;
def fprofile_dir : Joined<["-"], "fprofile-dir=">, Group<f_Group>;
def fuse_ld_EQ : Joined<["-"], "fuse-ld=">, Group<f_Group>, Flags<[CoreOption]>;
defm align_labels : BooleanFFlag<"align-labels">, Group<clang_ignored_gcc_optimization_f_Group>;
def falign_labels_EQ : Joined<["-"], "falign-labels=">, Group<clang_ignored_gcc_optimization_f_Group>;
defm align_loops : BooleanFFlag<"align-loops">, Group<clang_ignored_gcc_optimization_f_Group>;
def falign_loops_EQ : Joined<["-"], "falign-loops=">, Group<clang_ignored_gcc_optimization_f_Group>;
defm align_jumps : BooleanFFlag<"align-jumps">, Group<clang_ignored_gcc_optimization_f_Group>;
def falign_jumps_EQ : Joined<["-"], "falign-jumps=">, Group<clang_ignored_gcc_optimization_f_Group>;
// FIXME: This option should be supported and wired up to our diognostics, but
// ignore it for now to avoid breaking builds that use it.
def fdiagnostics_show_location_EQ : Joined<["-"], "fdiagnostics-show-location=">, Group<clang_ignored_f_Group>;
defm fcheck_new : BooleanFFlag<"check-new">, Group<clang_ignored_f_Group>;
defm caller_saves : BooleanFFlag<"caller-saves">, Group<clang_ignored_gcc_optimization_f_Group>;
defm reorder_blocks : BooleanFFlag<"reorder-blocks">, Group<clang_ignored_gcc_optimization_f_Group>;
defm eliminate_unused_debug_types : BooleanFFlag<"eliminate-unused-debug-types">, Group<clang_ignored_f_Group>;
defm branch_count_reg : BooleanFFlag<"branch-count-reg">, Group<clang_ignored_gcc_optimization_f_Group>;
defm default_inline : BooleanFFlag<"default-inline">, Group<clang_ignored_gcc_optimization_f_Group>;
defm fat_lto_objects : BooleanFFlag<"fat-lto-objects">, Group<clang_ignored_gcc_optimization_f_Group>;
defm float_store : BooleanFFlag<"float-store">, Group<clang_ignored_gcc_optimization_f_Group>;
defm friend_injection : BooleanFFlag<"friend-injection">, Group<clang_ignored_f_Group>;
defm function_attribute_list : BooleanFFlag<"function-attribute-list">, Group<clang_ignored_f_Group>;
defm gcse : BooleanFFlag<"gcse">, Group<clang_ignored_gcc_optimization_f_Group>;
defm gcse_after_reload: BooleanFFlag<"gcse-after-reload">, Group<clang_ignored_gcc_optimization_f_Group>;
defm gcse_las: BooleanFFlag<"gcse-las">, Group<clang_ignored_gcc_optimization_f_Group>;
defm gcse_sm: BooleanFFlag<"gcse-sm">, Group<clang_ignored_gcc_optimization_f_Group>;
defm gnu : BooleanFFlag<"gnu">, Group<clang_ignored_f_Group>;
defm implicit_templates : BooleanFFlag<"implicit-templates">, Group<clang_ignored_f_Group>;
defm implement_inlines : BooleanFFlag<"implement-inlines">, Group<clang_ignored_f_Group>;
defm merge_constants : BooleanFFlag<"merge-constants">, Group<clang_ignored_gcc_optimization_f_Group>;
defm modulo_sched : BooleanFFlag<"modulo-sched">, Group<clang_ignored_gcc_optimization_f_Group>;
defm modulo_sched_allow_regmoves : BooleanFFlag<"modulo-sched-allow-regmoves">,
Group<clang_ignored_gcc_optimization_f_Group>;
defm inline_functions_called_once : BooleanFFlag<"inline-functions-called-once">,
Group<clang_ignored_gcc_optimization_f_Group>;
def finline_limit_EQ : Joined<["-"], "finline-limit=">, Group<clang_ignored_gcc_optimization_f_Group>;
defm finline_limit : BooleanFFlag<"inline-limit">, Group<clang_ignored_gcc_optimization_f_Group>;
defm inline_small_functions : BooleanFFlag<"inline-small-functions">,
Group<clang_ignored_gcc_optimization_f_Group>;
defm ipa_cp : BooleanFFlag<"ipa-cp">,
Group<clang_ignored_gcc_optimization_f_Group>;
defm ivopts : BooleanFFlag<"ivopts">, Group<clang_ignored_gcc_optimization_f_Group>;
defm non_call_exceptions : BooleanFFlag<"non-call-exceptions">, Group<clang_ignored_f_Group>;
defm peel_loops : BooleanFFlag<"peel-loops">, Group<clang_ignored_gcc_optimization_f_Group>;
defm permissive : BooleanFFlag<"permissive">, Group<clang_ignored_f_Group>;
defm prefetch_loop_arrays : BooleanFFlag<"prefetch-loop-arrays">, Group<clang_ignored_gcc_optimization_f_Group>;
defm printf : BooleanFFlag<"printf">, Group<clang_ignored_f_Group>;
defm profile : BooleanFFlag<"profile">, Group<clang_ignored_f_Group>;
defm profile_correction : BooleanFFlag<"profile-correction">, Group<clang_ignored_gcc_optimization_f_Group>;
defm profile_generate_sampling : BooleanFFlag<"profile-generate-sampling">, Group<clang_ignored_f_Group>;
defm profile_reusedist : BooleanFFlag<"profile-reusedist">, Group<clang_ignored_f_Group>;
defm profile_values : BooleanFFlag<"profile-values">, Group<clang_ignored_gcc_optimization_f_Group>;
defm regs_graph : BooleanFFlag<"regs-graph">, Group<clang_ignored_f_Group>;
defm rename_registers : BooleanFFlag<"rename-registers">, Group<clang_ignored_gcc_optimization_f_Group>;
defm ripa : BooleanFFlag<"ripa">, Group<clang_ignored_f_Group>;
defm rounding_math : BooleanFFlag<"rounding-math">, Group<clang_ignored_gcc_optimization_f_Group>;
defm schedule_insns : BooleanFFlag<"schedule-insns">, Group<clang_ignored_gcc_optimization_f_Group>;
defm schedule_insns2 : BooleanFFlag<"schedule-insns2">, Group<clang_ignored_gcc_optimization_f_Group>;
defm see : BooleanFFlag<"see">, Group<clang_ignored_f_Group>;
defm signaling_nans : BooleanFFlag<"signaling-nans">, Group<clang_ignored_gcc_optimization_f_Group>;
defm single_precision_constant : BooleanFFlag<"single-precision-constant">,
Group<clang_ignored_gcc_optimization_f_Group>;
defm spec_constr_count : BooleanFFlag<"spec-constr-count">, Group<clang_ignored_f_Group>;
defm stack_check : BooleanFFlag<"stack-check">, Group<clang_ignored_f_Group>;
defm strength_reduce :
BooleanFFlag<"strength-reduce">, Group<clang_ignored_gcc_optimization_f_Group>;
defm tls_model : BooleanFFlag<"tls-model">, Group<clang_ignored_f_Group>;
defm tracer : BooleanFFlag<"tracer">, Group<clang_ignored_gcc_optimization_f_Group>;
defm tree_dce : BooleanFFlag<"tree-dce">, Group<clang_ignored_gcc_optimization_f_Group>;
defm tree_loop_im : BooleanFFlag<"tree_loop_im">, Group<clang_ignored_gcc_optimization_f_Group>;
defm tree_loop_ivcanon : BooleanFFlag<"tree_loop_ivcanon">, Group<clang_ignored_gcc_optimization_f_Group>;
defm tree_loop_linear : BooleanFFlag<"tree_loop_linear">, Group<clang_ignored_gcc_optimization_f_Group>;
defm tree_salias : BooleanFFlag<"tree-salias">, Group<clang_ignored_f_Group>;
defm tree_ter : BooleanFFlag<"tree-ter">, Group<clang_ignored_gcc_optimization_f_Group>;
defm tree_vectorizer_verbose : BooleanFFlag<"tree-vectorizer-verbose">, Group<clang_ignored_f_Group>;
defm tree_vrp : BooleanFFlag<"tree-vrp">, Group<clang_ignored_gcc_optimization_f_Group>;
defm unroll_all_loops : BooleanFFlag<"unroll-all-loops">, Group<clang_ignored_gcc_optimization_f_Group>;
defm unsafe_loop_optimizations : BooleanFFlag<"unsafe-loop-optimizations">,
Group<clang_ignored_gcc_optimization_f_Group>;
defm unswitch_loops : BooleanFFlag<"unswitch-loops">, Group<clang_ignored_gcc_optimization_f_Group>;
defm use_linker_plugin : BooleanFFlag<"use-linker-plugin">, Group<clang_ignored_gcc_optimization_f_Group>;
defm vect_cost_model : BooleanFFlag<"vect-cost-model">, Group<clang_ignored_gcc_optimization_f_Group>;
defm variable_expansion_in_unroller : BooleanFFlag<"variable-expansion-in-unroller">,
Group<clang_ignored_gcc_optimization_f_Group>;
defm web : BooleanFFlag<"web">, Group<clang_ignored_gcc_optimization_f_Group>;
defm whole_program : BooleanFFlag<"whole-program">, Group<clang_ignored_gcc_optimization_f_Group>;
defm devirtualize : BooleanFFlag<"devirtualize">, Group<clang_ignored_gcc_optimization_f_Group>;
defm devirtualize_speculatively : BooleanFFlag<"devirtualize-speculatively">,
Group<clang_ignored_gcc_optimization_f_Group>;
// Generic gfortran options.
def A_DASH : Joined<["-"], "A-">, Group<gfortran_Group>;
def J : JoinedOrSeparate<["-"], "J">, Flags<[RenderJoined]>, Group<gfortran_Group>;
def cpp : Flag<["-"], "cpp">, Group<gfortran_Group>;
def nocpp : Flag<["-"], "nocpp">, Group<gfortran_Group>;
def static_libgfortran : Flag<["-"], "static-libgfortran">, Group<gfortran_Group>;
// "f" options with values for gfortran.
def fblas_matmul_limit_EQ : Joined<["-"], "fblas-matmul-limit=">, Group<gfortran_Group>;
def fcheck_EQ : Joined<["-"], "fcheck=">, Group<gfortran_Group>;
def fcoarray_EQ : Joined<["-"], "fcoarray=">, Group<gfortran_Group>;
def fconvert_EQ : Joined<["-"], "fconvert=">, Group<gfortran_Group>;
def ffixed_line_length_VALUE : Joined<["-"], "ffixed-line-length-">, Group<gfortran_Group>;
def ffpe_trap_EQ : Joined<["-"], "ffpe-trap=">, Group<gfortran_Group>;
def ffree_line_length_VALUE : Joined<["-"], "ffree-line-length-">, Group<gfortran_Group>;
def finit_character_EQ : Joined<["-"], "finit-character=">, Group<gfortran_Group>;
def finit_integer_EQ : Joined<["-"], "finit-integer=">, Group<gfortran_Group>;
def finit_logical_EQ : Joined<["-"], "finit-logical=">, Group<gfortran_Group>;
def finit_real_EQ : Joined<["-"], "finit-real=">, Group<gfortran_Group>;
def fmax_array_constructor_EQ : Joined<["-"], "fmax-array-constructor=">, Group<gfortran_Group>;
def fmax_errors_EQ : Joined<["-"], "fmax-errors=">, Group<gfortran_Group>;
def fmax_stack_var_size_EQ : Joined<["-"], "fmax-stack-var-size=">, Group<gfortran_Group>;
def fmax_subrecord_length_EQ : Joined<["-"], "fmax-subrecord-length=">, Group<gfortran_Group>;
def frecord_marker_EQ : Joined<["-"], "frecord-marker=">, Group<gfortran_Group>;
// "f" flags for gfortran.
defm aggressive_function_elimination : BooleanFFlag<"aggressive-function-elimination">, Group<gfortran_Group>;
defm align_commons : BooleanFFlag<"align-commons">, Group<gfortran_Group>;
defm all_intrinsics : BooleanFFlag<"all-intrinsics">, Group<gfortran_Group>;
defm automatic : BooleanFFlag<"automatic">, Group<gfortran_Group>;
defm backslash : BooleanFFlag<"backslash">, Group<gfortran_Group>;
defm backtrace : BooleanFFlag<"backtrace">, Group<gfortran_Group>;
defm bounds_check : BooleanFFlag<"bounds-check">, Group<gfortran_Group>;
defm check_array_temporaries : BooleanFFlag<"check-array-temporaries">, Group<gfortran_Group>;
defm cray_pointer : BooleanFFlag<"cray-pointer">, Group<gfortran_Group>;
defm d_lines_as_code : BooleanFFlag<"d-lines-as-code">, Group<gfortran_Group>;
defm d_lines_as_comments : BooleanFFlag<"d-lines-as-comments">, Group<gfortran_Group>;
defm default_double_8 : BooleanFFlag<"default-double-8">, Group<gfortran_Group>;
defm default_integer_8 : BooleanFFlag<"default-integer-8">, Group<gfortran_Group>;
defm default_real_8 : BooleanFFlag<"default-real-8">, Group<gfortran_Group>;
defm dollar_ok : BooleanFFlag<"dollar-ok">, Group<gfortran_Group>;
defm dump_fortran_optimized : BooleanFFlag<"dump-fortran-optimized">, Group<gfortran_Group>;
defm dump_fortran_original : BooleanFFlag<"dump-fortran-original">, Group<gfortran_Group>;
defm dump_parse_tree : BooleanFFlag<"dump-parse-tree">, Group<gfortran_Group>;
defm external_blas : BooleanFFlag<"external-blas">, Group<gfortran_Group>;
defm f2c : BooleanFFlag<"f2c">, Group<gfortran_Group>;
defm fixed_form : BooleanFFlag<"fixed-form">, Group<gfortran_Group>;
defm free_form : BooleanFFlag<"free-form">, Group<gfortran_Group>;
defm frontend_optimize : BooleanFFlag<"frontend-optimize">, Group<gfortran_Group>;
defm implicit_none : BooleanFFlag<"implicit-none">, Group<gfortran_Group>;
defm init_local_zero : BooleanFFlag<"init-local-zero">, Group<gfortran_Group>;
defm integer_4_integer_8 : BooleanFFlag<"integer-4-integer-8">, Group<gfortran_Group>;
defm intrinsic_modules_path : BooleanFFlag<"intrinsic-modules-path">, Group<gfortran_Group>;
defm max_identifier_length : BooleanFFlag<"max-identifier-length">, Group<gfortran_Group>;
defm module_private : BooleanFFlag<"module-private">, Group<gfortran_Group>;
defm pack_derived : BooleanFFlag<"pack-derived">, Group<gfortran_Group>;
defm protect_parens : BooleanFFlag<"protect-parens">, Group<gfortran_Group>;
defm range_check : BooleanFFlag<"range-check">, Group<gfortran_Group>;
defm real_4_real_10 : BooleanFFlag<"real-4-real-10">, Group<gfortran_Group>;
defm real_4_real_16 : BooleanFFlag<"real-4-real-16">, Group<gfortran_Group>;
defm real_4_real_8 : BooleanFFlag<"real-4-real-8">, Group<gfortran_Group>;
defm real_8_real_10 : BooleanFFlag<"real-8-real-10">, Group<gfortran_Group>;
defm real_8_real_16 : BooleanFFlag<"real-8-real-16">, Group<gfortran_Group>;
defm real_8_real_4 : BooleanFFlag<"real-8-real-4">, Group<gfortran_Group>;
defm realloc_lhs : BooleanFFlag<"realloc-lhs">, Group<gfortran_Group>;
defm recursive : BooleanFFlag<"recursive">, Group<gfortran_Group>;
defm repack_arrays : BooleanFFlag<"repack-arrays">, Group<gfortran_Group>;
defm second_underscore : BooleanFFlag<"second-underscore">, Group<gfortran_Group>;
defm sign_zero : BooleanFFlag<"sign-zero">, Group<gfortran_Group>;
defm stack_arrays : BooleanFFlag<"stack-arrays">, Group<gfortran_Group>;
defm underscoring : BooleanFFlag<"underscoring">, Group<gfortran_Group>;
defm whole_file : BooleanFFlag<"whole-file">, Group<gfortran_Group>;
include "CC1Options.td"
include "CLCompatOptions.td"
Index: projects/clang700-import/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
===================================================================
--- projects/clang700-import/contrib/llvm/tools/clang/include/clang/Sema/Sema.h (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/clang/include/clang/Sema/Sema.h (revision 340125)
@@ -1,10823 +1,10825 @@
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This represents the stack of attributes that were pushed by
/// \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
SmallVector<PragmaAttributeEntry, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a list of either DeclRefExprs or MemberExprs
/// that contain a reference to a variable (constant) that may or may not
/// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue
/// and discarded value conversions have been applied to all subexpressions
/// of the enclosing full expression. This is cleared at the end of each
/// full expression.
llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedExceptionSpecChecks;
/// All the members seen during a class definition which were both
/// explicitly defaulted and had explicitly-specified exception
/// specifications, along with the function type containing their
/// user-specified exception specification. Those exception specifications
/// were overridden with the default specifications, but we still need to
/// check whether they are compatible with the default specification, and
/// we can't do that until the nesting set of class definitions is complete.
SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2>
DelayedDefaultedMemberExceptionSpecs;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
TypeSourceInfo *ReturnTypeInfo);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
clang::Module *Module = nullptr;
bool ModuleInterface = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M) { return VisibleModules.isVisible(M); }
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification
ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
+ bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
+ QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
Partition, ///< 'module partition X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path);
/// The parser has processed a module import declaration.
///
/// \param AtLoc The location of the '@' symbol, if any.
///
/// \param ImportLoc The location of the 'import' keyword.
///
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
ModuleIdPath Path);
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// Make the given externally-produced declaration visible at the
/// top level scope.
///
/// \param D The externally-produced declaration to push.
///
/// \param Name The name of the externally-produced declaration.
void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range,
IdentifierInfo *Platform,
bool Implicit,
VersionTuple Introduced,
VersionTuple Deprecated,
VersionTuple Obsoleted,
bool IsUnavailable,
StringRef Message,
bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
CommonAttr *mergeCommonAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
/// Checks availability of the function depending on the current
/// function context.Inside an unavailable function,unavailability is ignored.
///
/// \returns true if \p FD is unavailable and current context is inside
/// an available function, false otherwise.
bool isFunctionConsideredUnavailable(FunctionDecl *FD);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf ///< Condition in a constexpr if statement.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
void AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = false,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
///
/// \param AllowTopLevelCond Whether to allow the result to be the
/// complete top-level condition.
std::pair<Expr *, std::string>
findFailedBooleanCondition(Expr *Cond, bool AllowTopLevelCond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType &T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Check whether a nullability type specifier can be added to the given
/// type.
///
/// \param type The type to which the nullability specifier will be
/// added. On success, this type will be updated appropriately.
///
/// \param nullability The nullability specifier to add.
///
/// \param nullabilityLoc The location of the nullability specifier.
///
/// \param isContextSensitive Whether this nullability specifier was
/// written as a context-sensitive keyword (in an Objective-C
/// method) or an Objective-C property attribute, rather than as an
/// underscored type specifier.
///
/// \param allowArrayTypes Whether to accept nullability specifiers on an
/// array type (e.g., because it will decay to a pointer).
///
/// \returns true if nullability cannot be applied, false otherwise.
bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability,
SourceLocation nullabilityLoc,
bool isContextSensitive,
bool allowArrayTypes);
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(ActOnFinishFullExpr(Arg, CC).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
std::unique_ptr<CorrectionCandidateCallback> CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentType IT);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
SourceLocation LParenLoc,
ArrayRef<Expr *> Arg,
SourceLocation RParenLoc,
Expr *Config = nullptr,
bool IsExecConfig = false);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defautled
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Expr *ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr) {
return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc()
: SourceLocation());
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue = false,
bool IsConstexpr = false,
bool IsLambdaInitCaptureInitializer = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
bool IsConstexprSpecified);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, IdentifierInfo *Id,
LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef,
IdentifierInfo *Id,
bool DirectInit, Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc,
const CXXRecordDecl *RD);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
const FunctionProtoType *T);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true);
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation());
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false,
bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName, SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Check the application of the Objective-C '__kindof' qualifier to
/// the given type.
bool checkObjCKindOfType(QualType &type, SourceLocation loc);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
/// Called on well-formed '\#pragma clang attribute push'.
void ActOnPragmaAttributePush(ParsedAttr &Attribute, SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
void AddNSConsumedAttr(SourceRange AttrRange, Decl *D,
unsigned SpellingListIndex, bool isNSConsumed,
bool isTemplateInstantiation);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Set to true inside '#pragma omp declare target' region.
bool IsInOpenMPDeclareTargetContext = false;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D) const;
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return IsInOpenMPDeclareTargetContext;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return true if (un)supported features for the current target should be
/// diagnosed if OpenMP (offloading) is enabled.
bool shouldDiagnoseTargetSupportFromOpenMP() const {
return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() ||
isInOpenMPTargetExecutionDirective();
}
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind, OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation DepLinMapLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(OpenMPMapClauseKind MapTypeModifier,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *ActOnOpenMPToClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType &paramType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage,
SourceLocation lbrac, SourceLocation rbrac,
SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(QualType ReceiverType,
ObjCMethodDecl *Method,
bool isClassMessage, bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
CUDADeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
CUDAKnownEmittedFns;
/// A partial call graph maintained during CUDA compilation to support
/// deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to CUDAKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
CUDACallGraph;
/// Diagnostic builder for CUDA errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class CUDADiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
CUDADiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
~CUDADiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (CUDADiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a CUDADiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const CUDADiagBuilder &operator<<(const CUDADiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiag.hasValue())
*Diag.PartialDiag << Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<PartialDiagnostic> PartialDiag;
};
/// Creates a CUDADiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
CUDADiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a CUDADiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
CUDADiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc,
ArrayRef<Expr *> Args);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteReturn(Scope *S);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDefaultedMemberExceptionSpecs.empty() &&
"there shouldn't be any pending delayed defaulted member "
"exception specs");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedExceptionSpecChecks) SavedExceptionSpecChecks;
decltype(DelayedDefaultedMemberExceptionSpecs)
SavedDefaultedMemberExceptionSpecs;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedExceptionSpecChecks.swap(S.DelayedExceptionSpecChecks);
SavedDefaultedMemberExceptionSpecs.swap(
S.DelayedDefaultedMemberExceptionSpecs);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
Index: projects/clang700-import/contrib/llvm/tools/clang/lib/Basic/Version.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/tools/clang/lib/Basic/Version.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/clang/lib/Basic/Version.cpp (revision 340125)
@@ -1,151 +1,151 @@
//===- Version.cpp - Clang Version Number -----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines several version-related utility functions for Clang.
//
//===----------------------------------------------------------------------===//
#include "clang/Basic/Version.h"
#include "clang/Basic/LLVM.h"
#include "clang/Config/config.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdlib>
#include <cstring>
#ifdef HAVE_SVN_VERSION_INC
# include "SVNVersion.inc"
#endif
namespace clang {
std::string getClangRepositoryPath() {
#if defined(CLANG_REPOSITORY_STRING)
return CLANG_REPOSITORY_STRING;
#else
#ifdef SVN_REPOSITORY
StringRef URL(SVN_REPOSITORY);
#else
StringRef URL("");
#endif
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
- StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_700/final/lib/Basic/Version.cpp $");
+ StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/branches/release_70/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));
}
// Strip off version from a build from an integration branch.
URL = URL.slice(0, URL.find("/src/tools/clang"));
// Trim path prefix off, assuming path came from standard cfe path.
size_t Start = URL.find("cfe/");
if (Start != StringRef::npos)
URL = URL.substr(Start + 4);
return URL;
#endif
}
std::string getLLVMRepositoryPath() {
#ifdef LLVM_REPOSITORY
StringRef URL(LLVM_REPOSITORY);
#else
StringRef URL("");
#endif
// Trim path prefix off, assuming path came from standard llvm path.
// Leave "llvm/" prefix to distinguish the following llvm revision from the
// clang revision.
size_t Start = URL.find("llvm/");
if (Start != StringRef::npos)
URL = URL.substr(Start);
return URL;
}
std::string getClangRevision() {
#ifdef SVN_REVISION
return SVN_REVISION;
#else
return "";
#endif
}
std::string getLLVMRevision() {
#ifdef LLVM_REVISION
return LLVM_REVISION;
#else
return "";
#endif
}
std::string getClangFullRepositoryVersion() {
std::string buf;
llvm::raw_string_ostream OS(buf);
std::string Path = getClangRepositoryPath();
std::string Revision = getClangRevision();
if (!Path.empty() || !Revision.empty()) {
OS << '(';
if (!Path.empty())
OS << Path;
if (!Revision.empty()) {
if (!Path.empty())
OS << ' ';
OS << Revision;
}
OS << ')';
}
// Support LLVM in a separate repository.
std::string LLVMRev = getLLVMRevision();
if (!LLVMRev.empty() && LLVMRev != Revision) {
OS << " (";
std::string LLVMRepo = getLLVMRepositoryPath();
if (!LLVMRepo.empty())
OS << LLVMRepo << ' ';
OS << LLVMRev << ')';
}
return OS.str();
}
std::string getClangFullVersion() {
return getClangToolFullVersion("clang");
}
std::string getClangToolFullVersion(StringRef ToolName) {
std::string buf;
llvm::raw_string_ostream OS(buf);
#ifdef CLANG_VENDOR
OS << CLANG_VENDOR;
#endif
OS << ToolName << " version " CLANG_VERSION_STRING " "
<< getClangFullRepositoryVersion();
// If vendor supplied, include the base LLVM version as well.
#ifdef CLANG_VENDOR
OS << " (based on " << BACKEND_PACKAGE_STRING << ")";
#endif
return OS.str();
}
std::string getClangFullCPPVersion() {
// The version string we report in __VERSION__ is just a compacted version of
// the one we report on the command line.
std::string buf;
llvm::raw_string_ostream OS(buf);
#ifdef CLANG_VENDOR
OS << CLANG_VENDOR;
#endif
OS << "Clang " CLANG_VERSION_STRING " " << getClangFullRepositoryVersion();
return OS.str();
}
} // end namespace clang
Index: projects/clang700-import/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp (revision 340125)
@@ -1,12309 +1,12309 @@
//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This contains code to emit Builtin calls as LLVM code.
//
//===----------------------------------------------------------------------===//
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "ConstantEmitter.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/Analysis/Analyses/OSLog.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/TargetParser.h"
#include <sstream>
using namespace clang;
using namespace CodeGen;
using namespace llvm;
static
int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
return std::min(High, std::max(Low, Value));
}
/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
unsigned BuiltinID) {
assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
// Get the name, skip over the __builtin_ prefix (if necessary).
StringRef Name;
GlobalDecl D(FD);
// If the builtin has been declared explicitly with an assembler label,
// use the mangled name. This differs from the plain label on platforms
// that prefix labels.
if (FD->hasAttr<AsmLabelAttr>())
Name = getMangledName(D);
else
Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
llvm::FunctionType *Ty =
cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
}
/// Emit the conversions required to turn the given value into an
/// integer of the given size.
static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
QualType T, llvm::IntegerType *IntType) {
V = CGF.EmitToMemory(V, T);
if (V->getType()->isPointerTy())
return CGF.Builder.CreatePtrToInt(V, IntType);
assert(V->getType() == IntType);
return V;
}
static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
QualType T, llvm::Type *ResultType) {
V = CGF.EmitFromMemory(V, T);
if (ResultType->isPointerTy())
return CGF.Builder.CreateIntToPtr(V, ResultType);
assert(V->getType() == ResultType);
return V;
}
/// Utility to insert an atomic instruction based on Instrinsic::ID
/// and the expression node.
static Value *MakeBinaryAtomicValue(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
llvm::Value *Args[2];
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
return EmitFromInt(CGF, Result, T, ValueType);
}
static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
Value *Val = CGF.EmitScalarExpr(E->getArg(0));
Value *Address = CGF.EmitScalarExpr(E->getArg(1));
// Convert the type of the pointer to a pointer to the stored type.
Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
Value *BC = CGF.Builder.CreateBitCast(
Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
LV.setNontemporal(true);
CGF.EmitStoreOfScalar(Val, LV, false);
return nullptr;
}
static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
Value *Address = CGF.EmitScalarExpr(E->getArg(0));
LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
LV.setNontemporal(true);
return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
}
static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E) {
return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
}
/// Utility to insert an atomic instruction based Instrinsic::ID and
/// the expression node, where the return value is the result of the
/// operation.
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E,
Instruction::BinaryOps Op,
bool Invert = false) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
E->getArg(0)->getType()->getPointeeType()));
assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType =
llvm::IntegerType::get(CGF.getLLVMContext(),
CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
llvm::Value *Args[2];
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
if (Invert)
Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
llvm::ConstantInt::get(IntType, -1));
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
}
/// Utility to insert an atomic cmpxchg instruction.
///
/// @param CGF The current codegen function.
/// @param E Builtin call expression to convert to cmpxchg.
/// arg0 - address to operate on
/// arg1 - value to compare with
/// arg2 - new value
/// @param ReturnBool Specifies whether to return success flag of
/// cmpxchg result or the old value.
///
/// @returns result of cmpxchg, according to ReturnBool
static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
bool ReturnBool) {
QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
llvm::IntegerType *IntType = llvm::IntegerType::get(
CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
Value *Args[3];
Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
Args[1] = CGF.EmitScalarExpr(E->getArg(1));
llvm::Type *ValueType = Args[1]->getType();
Args[1] = EmitToInt(CGF, Args[1], T, IntType);
Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
llvm::AtomicOrdering::SequentiallyConsistent);
if (ReturnBool)
// Extract boolean success flag and zext it to int.
return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
CGF.ConvertType(E->getType()));
else
// Extract old value and emit it using the same type as compare value.
return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
ValueType);
}
// Emit a simple mangled intrinsic that has 1 argument and a return type
// matching the argument type.
static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, Src0);
}
// Emit an intrinsic that has 2 operands of the same type as its result.
static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, { Src0, Src1 });
}
// Emit an intrinsic that has 3 operands of the same type as its result.
static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
}
// Emit an intrinsic that has 1 float or double operand, and 1 integer.
static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
unsigned IntrinsicID) {
llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
return CGF.Builder.CreateCall(F, {Src0, Src1});
}
/// EmitFAbs - Emit a call to @llvm.fabs().
static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
Call->setDoesNotAccessMemory();
return Call;
}
/// Emit the computation of the sign bit for a floating point value. Returns
/// the i1 sign bit value.
static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
LLVMContext &C = CGF.CGM.getLLVMContext();
llvm::Type *Ty = V->getType();
int Width = Ty->getPrimitiveSizeInBits();
llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
V = CGF.Builder.CreateBitCast(V, IntTy);
if (Ty->isPPC_FP128Ty()) {
// We want the sign bit of the higher-order double. The bitcast we just
// did works as if the double-double was stored to memory and then
// read as an i128. The "store" will put the higher-order double in the
// lower address in both little- and big-Endian modes, but the "load"
// will treat those bits as a different part of the i128: the low bits in
// little-Endian, the high bits in big-Endian. Therefore, on big-Endian
// we need to shift the high bits down to the low before truncating.
Width >>= 1;
if (CGF.getTarget().isBigEndian()) {
Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
V = CGF.Builder.CreateLShr(V, ShiftCst);
}
// We are truncating value in order to extract the higher-order
// double, which we will be using to extract the sign from.
IntTy = llvm::IntegerType::get(C, Width);
V = CGF.Builder.CreateTrunc(V, IntTy);
}
Value *Zero = llvm::Constant::getNullValue(IntTy);
return CGF.Builder.CreateICmpSLT(V, Zero);
}
static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
const CallExpr *E, llvm::Constant *calleeValue) {
CGCallee callee = CGCallee::forDirect(calleeValue, FD);
return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
}
/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
/// depending on IntrinsicID.
///
/// \arg CGF The current codegen function.
/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
/// \arg X The first argument to the llvm.*.with.overflow.*.
/// \arg Y The second argument to the llvm.*.with.overflow.*.
/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
/// \returns The result (i.e. sum/product) returned by the intrinsic.
static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
const llvm::Intrinsic::ID IntrinsicID,
llvm::Value *X, llvm::Value *Y,
llvm::Value *&Carry) {
// Make sure we have integers of the same width.
assert(X->getType() == Y->getType() &&
"Arguments must be the same type. (Did you forget to make sure both "
"arguments have the same integer width?)");
llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
return CGF.Builder.CreateExtractValue(Tmp, 0);
}
static Value *emitRangedBuiltin(CodeGenFunction &CGF,
unsigned IntrinsicID,
int low, int high) {
llvm::MDBuilder MDHelper(CGF.getLLVMContext());
llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
Value *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
llvm::Instruction *Call = CGF.Builder.CreateCall(F);
Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
return Call;
}
namespace {
struct WidthAndSignedness {
unsigned Width;
bool Signed;
};
}
static WidthAndSignedness
getIntegerWidthAndSignedness(const clang::ASTContext &context,
const clang::QualType Type) {
assert(Type->isIntegerType() && "Given type is not an integer.");
unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width;
bool Signed = Type->isSignedIntegerType();
return {Width, Signed};
}
// Given one or more integer types, this function produces an integer type that
// encompasses them: any value in one of the given types could be expressed in
// the encompassing type.
static struct WidthAndSignedness
EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
assert(Types.size() > 0 && "Empty list of types.");
// If any of the given types is signed, we must return a signed type.
bool Signed = false;
for (const auto &Type : Types) {
Signed |= Type.Signed;
}
// The encompassing type must have a width greater than or equal to the width
// of the specified types. Additionally, if the encompassing type is signed,
// its width must be strictly greater than the width of any unsigned types
// given.
unsigned Width = 0;
for (const auto &Type : Types) {
unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
if (Width < MinWidth) {
Width = MinWidth;
}
}
return {Width, Signed};
}
Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
llvm::Type *DestType = Int8PtrTy;
if (ArgValue->getType() != DestType)
ArgValue =
Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
}
/// Checks if using the result of __builtin_object_size(p, @p From) in place of
/// __builtin_object_size(p, @p To) is correct
static bool areBOSTypesCompatible(int From, int To) {
// Note: Our __builtin_object_size implementation currently treats Type=0 and
// Type=2 identically. Encoding this implementation detail here may make
// improving __builtin_object_size difficult in the future, so it's omitted.
return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
}
static llvm::Value *
getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
}
llvm::Value *
CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType,
llvm::Value *EmittedE) {
uint64_t ObjectSize;
if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
return emitBuiltinObjectSize(E, Type, ResType, EmittedE);
return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
}
/// Returns a Value corresponding to the size of the given expression.
/// This Value may be either of the following:
/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
/// it)
/// - A call to the @llvm.objectsize intrinsic
///
/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
/// and we wouldn't otherwise try to reference a pass_object_size parameter,
/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
llvm::Value *
CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType,
llvm::Value *EmittedE) {
// We need to reference an argument if the pointer is a parameter with the
// pass_object_size attribute.
if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
if (Param != nullptr && PS != nullptr &&
areBOSTypesCompatible(PS->getType(), Type)) {
auto Iter = SizeArguments.find(Param);
assert(Iter != SizeArguments.end());
const ImplicitParamDecl *D = Iter->second;
auto DIter = LocalDeclMap.find(D);
assert(DIter != LocalDeclMap.end());
return EmitLoadOfScalar(DIter->second, /*volatile=*/false,
getContext().getSizeType(), E->getLocStart());
}
}
// LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
// evaluate E for side-effects. In either case, we shouldn't lower to
// @llvm.objectsize.
if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
return getDefaultBuiltinObjectSizeResult(Type, ResType);
Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
assert(Ptr->getType()->isPointerTy() &&
"Non-pointer passed to __builtin_object_size?");
Value *F = CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
// LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
Value *Min = Builder.getInt1((Type & 2) != 0);
// For GCC compatibility, __builtin_object_size treat NULL as unknown size.
Value *NullIsUnknown = Builder.getTrue();
return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown});
}
namespace {
/// A struct to generically desribe a bit test intrinsic.
struct BitTest {
enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
enum InterlockingKind : uint8_t {
Unlocked,
Sequential,
Acquire,
Release,
NoFence
};
ActionKind Action;
InterlockingKind Interlocking;
bool Is64Bit;
static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
};
} // namespace
BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
switch (BuiltinID) {
// Main portable variants.
case Builtin::BI_bittest:
return {TestOnly, Unlocked, false};
case Builtin::BI_bittestandcomplement:
return {Complement, Unlocked, false};
case Builtin::BI_bittestandreset:
return {Reset, Unlocked, false};
case Builtin::BI_bittestandset:
return {Set, Unlocked, false};
case Builtin::BI_interlockedbittestandreset:
return {Reset, Sequential, false};
case Builtin::BI_interlockedbittestandset:
return {Set, Sequential, false};
// X86-specific 64-bit variants.
case Builtin::BI_bittest64:
return {TestOnly, Unlocked, true};
case Builtin::BI_bittestandcomplement64:
return {Complement, Unlocked, true};
case Builtin::BI_bittestandreset64:
return {Reset, Unlocked, true};
case Builtin::BI_bittestandset64:
return {Set, Unlocked, true};
case Builtin::BI_interlockedbittestandreset64:
return {Reset, Sequential, true};
case Builtin::BI_interlockedbittestandset64:
return {Set, Sequential, true};
// ARM/AArch64-specific ordering variants.
case Builtin::BI_interlockedbittestandset_acq:
return {Set, Acquire, false};
case Builtin::BI_interlockedbittestandset_rel:
return {Set, Release, false};
case Builtin::BI_interlockedbittestandset_nf:
return {Set, NoFence, false};
case Builtin::BI_interlockedbittestandreset_acq:
return {Reset, Acquire, false};
case Builtin::BI_interlockedbittestandreset_rel:
return {Reset, Release, false};
case Builtin::BI_interlockedbittestandreset_nf:
return {Reset, NoFence, false};
}
llvm_unreachable("expected only bittest intrinsics");
}
static char bitActionToX86BTCode(BitTest::ActionKind A) {
switch (A) {
case BitTest::TestOnly: return '\0';
case BitTest::Complement: return 'c';
case BitTest::Reset: return 'r';
case BitTest::Set: return 's';
}
llvm_unreachable("invalid action");
}
static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
BitTest BT,
const CallExpr *E, Value *BitBase,
Value *BitPos) {
char Action = bitActionToX86BTCode(BT.Action);
char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
// Build the assembly.
SmallString<64> Asm;
raw_svector_ostream AsmOS(Asm);
if (BT.Interlocking != BitTest::Unlocked)
AsmOS << "lock ";
AsmOS << "bt";
if (Action)
AsmOS << Action;
AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}";
// Build the constraints. FIXME: We should support immediates when possible.
std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}";
llvm::IntegerType *IntType = llvm::IntegerType::get(
CGF.getLLVMContext(),
CGF.getContext().getTypeSize(E->getArg(1)->getType()));
llvm::Type *IntPtrType = IntType->getPointerTo();
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true);
return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
}
static llvm::AtomicOrdering
getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
switch (I) {
case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
case BitTest::Release: return llvm::AtomicOrdering::Release;
case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
}
llvm_unreachable("invalid interlocking");
}
/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
/// bits and a bit position and read and optionally modify the bit at that
/// position. The position index can be arbitrarily large, i.e. it can be larger
/// than 31 or 63, so we need an indexed load in the general case.
static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
unsigned BuiltinID,
const CallExpr *E) {
Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
// X86 has special BT, BTC, BTR, and BTS instructions that handle the array
// indexing operation internally. Use them if possible.
llvm::Triple::ArchType Arch = CGF.getTarget().getTriple().getArch();
if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64)
return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
// Otherwise, use generic code to load one byte and test the bit. Use all but
// the bottom three bits as the array index, and the bottom three bits to form
// a mask.
// Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
Value *ByteIndex = CGF.Builder.CreateAShr(
BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
ByteIndex, "bittest.byteaddr"),
CharUnits::One());
Value *PosLow =
CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
// The updating instructions will need a mask.
Value *Mask = nullptr;
if (BT.Action != BitTest::TestOnly) {
Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
"bittest.mask");
}
// Check the action and ordering of the interlocked intrinsics.
llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
Value *OldByte = nullptr;
if (Ordering != llvm::AtomicOrdering::NotAtomic) {
// Emit a combined atomicrmw load/store operation for the interlocked
// intrinsics.
llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
if (BT.Action == BitTest::Reset) {
Mask = CGF.Builder.CreateNot(Mask);
RMWOp = llvm::AtomicRMWInst::And;
}
OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
Ordering);
} else {
// Emit a plain load for the non-interlocked intrinsics.
OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
Value *NewByte = nullptr;
switch (BT.Action) {
case BitTest::TestOnly:
// Don't store anything.
break;
case BitTest::Complement:
NewByte = CGF.Builder.CreateXor(OldByte, Mask);
break;
case BitTest::Reset:
NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
break;
case BitTest::Set:
NewByte = CGF.Builder.CreateOr(OldByte, Mask);
break;
}
if (NewByte)
CGF.Builder.CreateStore(NewByte, ByteAddr);
}
// However we loaded the old byte, either by plain load or atomicrmw, shift
// the bit into the low position and mask it to 0 or 1.
Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
return CGF.Builder.CreateAnd(
ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
}
namespace {
enum class MSVCSetJmpKind {
_setjmpex,
_setjmp3,
_setjmp
};
}
/// MSVC handles setjmp a bit differently on different platforms. On every
/// architecture except 32-bit x86, the frame address is passed. On x86, extra
/// parameters can be passed as variadic arguments, but we always pass none.
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
const CallExpr *E) {
llvm::Value *Arg1 = nullptr;
llvm::Type *Arg1Ty = nullptr;
StringRef Name;
bool IsVarArg = false;
if (SJKind == MSVCSetJmpKind::_setjmp3) {
Name = "_setjmp3";
Arg1Ty = CGF.Int32Ty;
Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
IsVarArg = true;
} else {
Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
Arg1Ty = CGF.Int8PtrTy;
Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::frameaddress),
llvm::ConstantInt::get(CGF.Int32Ty, 0));
}
// Mark the call site and declaration with ReturnsTwice.
llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::ReturnsTwice);
llvm::Constant *SetJmpFn = CGF.CGM.CreateRuntimeFunction(
llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
ReturnsTwiceAttr, /*Local=*/true);
llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
llvm::Value *Args[] = {Buf, Arg1};
llvm::CallSite CS = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
CS.setAttributes(ReturnsTwiceAttr);
return RValue::get(CS.getInstruction());
}
// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
// we handle them here.
enum class CodeGenFunction::MSVCIntrin {
_BitScanForward,
_BitScanReverse,
_InterlockedAnd,
_InterlockedDecrement,
_InterlockedExchange,
_InterlockedExchangeAdd,
_InterlockedExchangeSub,
_InterlockedIncrement,
_InterlockedOr,
_InterlockedXor,
__fastfail,
};
Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
case MSVCIntrin::_BitScanForward:
case MSVCIntrin::_BitScanReverse: {
Value *ArgValue = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = ArgValue->getType();
llvm::Type *IndexType =
EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
llvm::Type *ResultType = ConvertType(E->getType());
Value *ArgZero = llvm::Constant::getNullValue(ArgType);
Value *ResZero = llvm::Constant::getNullValue(ResultType);
Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
BasicBlock *Begin = Builder.GetInsertBlock();
BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
Builder.SetInsertPoint(End);
PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
Builder.SetInsertPoint(Begin);
Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
Builder.CreateCondBr(IsZero, End, NotZero);
Result->addIncoming(ResZero, Begin);
Builder.SetInsertPoint(NotZero);
Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
if (BuiltinID == MSVCIntrin::_BitScanForward) {
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
Builder.CreateStore(ZeroCount, IndexAddress, false);
} else {
unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
Builder.CreateStore(Index, IndexAddress, false);
}
Builder.CreateBr(End);
Result->addIncoming(ResOne, NotZero);
Builder.SetInsertPoint(End);
return Result;
}
case MSVCIntrin::_InterlockedAnd:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
case MSVCIntrin::_InterlockedExchange:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
case MSVCIntrin::_InterlockedExchangeAdd:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
case MSVCIntrin::_InterlockedExchangeSub:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
case MSVCIntrin::_InterlockedOr:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
case MSVCIntrin::_InterlockedXor:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
case MSVCIntrin::_InterlockedDecrement: {
llvm::Type *IntTy = ConvertType(E->getType());
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
AtomicRMWInst::Sub,
EmitScalarExpr(E->getArg(0)),
ConstantInt::get(IntTy, 1),
llvm::AtomicOrdering::SequentiallyConsistent);
return Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1));
}
case MSVCIntrin::_InterlockedIncrement: {
llvm::Type *IntTy = ConvertType(E->getType());
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
AtomicRMWInst::Add,
EmitScalarExpr(E->getArg(0)),
ConstantInt::get(IntTy, 1),
llvm::AtomicOrdering::SequentiallyConsistent);
return Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1));
}
case MSVCIntrin::__fastfail: {
// Request immediate process termination from the kernel. The instruction
// sequences to do this are documented on MSDN:
// https://msdn.microsoft.com/en-us/library/dn774154.aspx
llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
StringRef Asm, Constraints;
switch (ISA) {
default:
ErrorUnsupported(E, "__fastfail call for this architecture");
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
Asm = "int $$0x29";
Constraints = "{cx}";
break;
case llvm::Triple::thumb:
Asm = "udf #251";
Constraints = "{r0}";
break;
}
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true);
llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoReturn);
CallSite CS = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
CS.setAttributes(NoReturnAttr);
return CS.getInstruction();
}
}
llvm_unreachable("Incorrect MSVC intrinsic!");
}
namespace {
// ARC cleanup for __builtin_os_log_format
struct CallObjCArcUse final : EHScopeStack::Cleanup {
CallObjCArcUse(llvm::Value *object) : object(object) {}
llvm::Value *object;
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.EmitARCIntrinsicUse(object);
}
};
}
Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
BuiltinCheckKind Kind) {
assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
&& "Unsupported builtin check kind");
Value *ArgValue = EmitScalarExpr(E);
if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
return ArgValue;
SanitizerScope SanScope(this);
Value *Cond = Builder.CreateICmpNE(
ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
SanitizerHandler::InvalidBuiltin,
{EmitCheckSourceLocation(E->getExprLoc()),
llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
None);
return ArgValue;
}
/// Get the argument type for arguments to os_log_helper.
static CanQualType getOSLogArgType(ASTContext &C, int Size) {
QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
return C.getCanonicalType(UnsignedTy);
}
llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
const analyze_os_log::OSLogBufferLayout &Layout,
CharUnits BufferAlignment) {
ASTContext &Ctx = getContext();
llvm::SmallString<64> Name;
{
raw_svector_ostream OS(Name);
OS << "__os_log_helper";
OS << "_" << BufferAlignment.getQuantity();
OS << "_" << int(Layout.getSummaryByte());
OS << "_" << int(Layout.getNumArgsByte());
for (const auto &Item : Layout.Items)
OS << "_" << int(Item.getSizeByte()) << "_"
<< int(Item.getDescriptorByte());
}
if (llvm::Function *F = CGM.getModule().getFunction(Name))
return F;
llvm::SmallVector<ImplicitParamDecl, 4> Params;
Params.emplace_back(Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"),
Ctx.VoidPtrTy, ImplicitParamDecl::Other);
for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
char Size = Layout.Items[I].getSizeByte();
if (!Size)
continue;
Params.emplace_back(
Ctx, nullptr, SourceLocation(),
&Ctx.Idents.get(std::string("arg") + llvm::to_string(I)),
getOSLogArgType(Ctx, Size), ImplicitParamDecl::Other);
}
FunctionArgList Args;
for (auto &P : Params)
Args.push_back(&P);
// The helper function has linkonce_odr linkage to enable the linker to merge
// identical functions. To ensure the merging always happens, 'noinline' is
// attached to the function when compiling with -Oz.
const CGFunctionInfo &FI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn = llvm::Function::Create(
FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
CGM.SetLLVMFunctionAttributes(nullptr, FI, Fn);
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
// Attach 'noinline' at -Oz.
if (CGM.getCodeGenOpts().OptimizeSize == 2)
Fn->addFnAttr(llvm::Attribute::NoInline);
auto NL = ApplyDebugLocation::CreateEmpty(*this);
IdentifierInfo *II = &Ctx.Idents.get(Name);
FunctionDecl *FD = FunctionDecl::Create(
Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
Ctx.VoidTy, nullptr, SC_PrivateExtern, false, false);
StartFunction(FD, Ctx.VoidTy, Fn, FI, Args);
// Create a scope with an artificial location for the body of this function.
auto AL = ApplyDebugLocation::CreateArtificial(*this);
CharUnits Offset;
Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(&Params[0]), "buf"),
BufferAlignment);
Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
unsigned I = 1;
for (const auto &Item : Layout.Items) {
Builder.CreateStore(
Builder.getInt8(Item.getDescriptorByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
Builder.CreateStore(
Builder.getInt8(Item.getSizeByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
CharUnits Size = Item.size();
if (!Size.getQuantity())
continue;
Address Arg = GetAddrOfLocalVar(&Params[I]);
Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
"argDataCast");
Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
Offset += Size;
++I;
}
FinishFunction();
return Fn;
}
RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
assert(E.getNumArgs() >= 2 &&
"__builtin_os_log_format takes at least 2 arguments");
ASTContext &Ctx = getContext();
analyze_os_log::OSLogBufferLayout Layout;
analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
// Ignore argument 1, the format string. It is not currently used.
CallArgList Args;
Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
for (const auto &Item : Layout.Items) {
int Size = Item.getSizeByte();
if (!Size)
continue;
llvm::Value *ArgVal;
if (const Expr *TheExpr = Item.getExpr()) {
ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
// Check if this is a retainable type.
if (TheExpr->getType()->isObjCRetainableType()) {
assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
"Only scalar can be a ObjC retainable type");
// Check if the object is constant, if not, save it in
// RetainableOperands.
if (!isa<Constant>(ArgVal))
RetainableOperands.push_back(ArgVal);
}
} else {
ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
}
unsigned ArgValSize =
CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
ArgValSize);
ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
CanQualType ArgTy = getOSLogArgType(Ctx, Size);
// If ArgVal has type x86_fp80, zero-extend ArgVal.
ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
Args.add(RValue::get(ArgVal), ArgTy);
}
const CGFunctionInfo &FI =
CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
Layout, BufAddr.getAlignment());
EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
// Push a clang.arc.use cleanup for each object in RetainableOperands. The
// cleanup will cause the use to appear after the final log call, keeping
// the object valid while it’s held in the log buffer. Note that if there’s
// a release cleanup on the object, it will already be active; since
// cleanups are emitted in reverse order, the use will occur before the
// object is released.
if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount &&
CGM.getCodeGenOpts().OptimizationLevel != 0)
for (llvm::Value *Object : RetainableOperands)
pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), Object);
return RValue::get(BufAddr.getPointer());
}
/// Determine if a binop is a checked mixed-sign multiply we can specialize.
static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
WidthAndSignedness Op1Info,
WidthAndSignedness Op2Info,
WidthAndSignedness ResultInfo) {
return BuiltinID == Builtin::BI__builtin_mul_overflow &&
Op1Info.Width == Op2Info.Width && Op1Info.Width >= ResultInfo.Width &&
Op1Info.Signed != Op2Info.Signed;
}
/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
/// the generic checked-binop irgen.
static RValue
EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
WidthAndSignedness Op1Info, const clang::Expr *Op2,
WidthAndSignedness Op2Info,
const clang::Expr *ResultArg, QualType ResultQTy,
WidthAndSignedness ResultInfo) {
assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
Op2Info, ResultInfo) &&
"Not a mixed-sign multipliction we can specialize");
// Emit the signed and unsigned operands.
const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
llvm::Type *OpTy = Signed->getType();
llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
llvm::Type *ResTy = ResultPtr.getElementType();
// Take the absolute value of the signed operand.
llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
llvm::Value *AbsSigned =
CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
// Perform a checked unsigned multiplication.
llvm::Value *UnsignedOverflow;
llvm::Value *UnsignedResult =
EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
Unsigned, UnsignedOverflow);
llvm::Value *Overflow, *Result;
if (ResultInfo.Signed) {
// Signed overflow occurs if the result is greater than INT_MAX or lesser
// than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width)
.zextOrSelf(Op1Info.Width);
llvm::Value *MaxResult =
CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
CGF.Builder.CreateZExt(IsNegative, OpTy));
llvm::Value *SignedOverflow =
CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
// Prepare the signed result (possibly by negating it).
llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
llvm::Value *SignedResult =
CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
} else {
// Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
llvm::Value *Underflow = CGF.Builder.CreateAnd(
IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
if (ResultInfo.Width < Op1Info.Width) {
auto IntMax =
llvm::APInt::getMaxValue(ResultInfo.Width).zext(Op1Info.Width);
llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
}
// Negate the product if it would be negative in infinite precision.
Result = CGF.Builder.CreateSelect(
IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
Result = CGF.Builder.CreateTrunc(Result, ResTy);
}
assert(Overflow && Result && "Missing overflow or result");
bool isVolatile =
ResultArg->getType()->getPointeeType().isVolatileQualified();
CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
isVolatile);
return RValue::get(Overflow);
}
static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
Value *&RecordPtr, CharUnits Align, Value *Func,
int Lvl) {
const auto *RT = RType->getAs<RecordType>();
ASTContext &Context = CGF.getContext();
RecordDecl *RD = RT->getDecl()->getDefinition();
ASTContext &Ctx = RD->getASTContext();
const ASTRecordLayout &RL = Ctx.getASTRecordLayout(RD);
std::string Pad = std::string(Lvl * 4, ' ');
Value *GString =
CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
Value *Res = CGF.Builder.CreateCall(Func, {GString});
static llvm::DenseMap<QualType, const char *> Types;
if (Types.empty()) {
Types[Context.CharTy] = "%c";
Types[Context.BoolTy] = "%d";
Types[Context.SignedCharTy] = "%hhd";
Types[Context.UnsignedCharTy] = "%hhu";
Types[Context.IntTy] = "%d";
Types[Context.UnsignedIntTy] = "%u";
Types[Context.LongTy] = "%ld";
Types[Context.UnsignedLongTy] = "%lu";
Types[Context.LongLongTy] = "%lld";
Types[Context.UnsignedLongLongTy] = "%llu";
Types[Context.ShortTy] = "%hd";
Types[Context.UnsignedShortTy] = "%hu";
Types[Context.VoidPtrTy] = "%p";
Types[Context.FloatTy] = "%f";
Types[Context.DoubleTy] = "%f";
Types[Context.LongDoubleTy] = "%Lf";
Types[Context.getPointerType(Context.CharTy)] = "%s";
Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
}
for (const auto *FD : RD->fields()) {
uint64_t Off = RL.getFieldOffset(FD->getFieldIndex());
Off = Ctx.toCharUnitsFromBits(Off).getQuantity();
Value *FieldPtr = RecordPtr;
if (RD->isUnion())
FieldPtr = CGF.Builder.CreatePointerCast(
FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
else
FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
FD->getFieldIndex());
GString = CGF.Builder.CreateGlobalStringPtr(
llvm::Twine(Pad)
.concat(FD->getType().getAsString())
.concat(llvm::Twine(' '))
.concat(FD->getNameAsString())
.concat(" : ")
.str());
Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
Res = CGF.Builder.CreateAdd(Res, TmpRes);
QualType CanonicalType =
FD->getType().getUnqualifiedType().getCanonicalType();
// We check whether we are in a recursive type
if (CanonicalType->isRecordType()) {
Value *TmpRes =
dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
Res = CGF.Builder.CreateAdd(TmpRes, Res);
continue;
}
// We try to determine the best format to print the current field
llvm::Twine Format = Types.find(CanonicalType) == Types.end()
? Types[Context.VoidPtrTy]
: Types[CanonicalType];
Address FieldAddress = Address(FieldPtr, Align);
FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
// FIXME Need to handle bitfield here
GString = CGF.Builder.CreateGlobalStringPtr(
Format.concat(llvm::Twine('\n')).str());
TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
Res = CGF.Builder.CreateAdd(Res, TmpRes);
}
GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
Res = CGF.Builder.CreateAdd(Res, TmpRes);
return Res;
}
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue) {
// See if we can constant fold this builtin. If so, don't emit it at all.
Expr::EvalResult Result;
if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
!Result.hasSideEffects()) {
if (Result.Val.isInt())
return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
Result.Val.getInt()));
if (Result.Val.isFloat())
return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
Result.Val.getFloat()));
}
// There are LLVM math intrinsics/instructions corresponding to math library
// functions except the LLVM op will never set errno while the math library
// might. Also, math builtins have the same semantics as their math library
// twins. Thus, we can transform math library and builtin calls to their
// LLVM counterparts if the call is marked 'const' (known to never set errno).
if (FD->hasAttr<ConstAttr>()) {
switch (BuiltinID) {
case Builtin::BIceil:
case Builtin::BIceilf:
case Builtin::BIceill:
case Builtin::BI__builtin_ceil:
case Builtin::BI__builtin_ceilf:
case Builtin::BI__builtin_ceill:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::ceil));
case Builtin::BIcopysign:
case Builtin::BIcopysignf:
case Builtin::BIcopysignl:
case Builtin::BI__builtin_copysign:
case Builtin::BI__builtin_copysignf:
case Builtin::BI__builtin_copysignl:
case Builtin::BI__builtin_copysignf128:
return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
case Builtin::BIcos:
case Builtin::BIcosf:
case Builtin::BIcosl:
case Builtin::BI__builtin_cos:
case Builtin::BI__builtin_cosf:
case Builtin::BI__builtin_cosl:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::cos));
case Builtin::BIexp:
case Builtin::BIexpf:
case Builtin::BIexpl:
case Builtin::BI__builtin_exp:
case Builtin::BI__builtin_expf:
case Builtin::BI__builtin_expl:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp));
case Builtin::BIexp2:
case Builtin::BIexp2f:
case Builtin::BIexp2l:
case Builtin::BI__builtin_exp2:
case Builtin::BI__builtin_exp2f:
case Builtin::BI__builtin_exp2l:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp2));
case Builtin::BIfabs:
case Builtin::BIfabsf:
case Builtin::BIfabsl:
case Builtin::BI__builtin_fabs:
case Builtin::BI__builtin_fabsf:
case Builtin::BI__builtin_fabsl:
case Builtin::BI__builtin_fabsf128:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
case Builtin::BIfloor:
case Builtin::BIfloorf:
case Builtin::BIfloorl:
case Builtin::BI__builtin_floor:
case Builtin::BI__builtin_floorf:
case Builtin::BI__builtin_floorl:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::floor));
case Builtin::BIfma:
case Builtin::BIfmaf:
case Builtin::BIfmal:
case Builtin::BI__builtin_fma:
case Builtin::BI__builtin_fmaf:
case Builtin::BI__builtin_fmal:
return RValue::get(emitTernaryBuiltin(*this, E, Intrinsic::fma));
case Builtin::BIfmax:
case Builtin::BIfmaxf:
case Builtin::BIfmaxl:
case Builtin::BI__builtin_fmax:
case Builtin::BI__builtin_fmaxf:
case Builtin::BI__builtin_fmaxl:
return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::maxnum));
case Builtin::BIfmin:
case Builtin::BIfminf:
case Builtin::BIfminl:
case Builtin::BI__builtin_fmin:
case Builtin::BI__builtin_fminf:
case Builtin::BI__builtin_fminl:
return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::minnum));
// fmod() is a special-case. It maps to the frem instruction rather than an
// LLVM intrinsic.
case Builtin::BIfmod:
case Builtin::BIfmodf:
case Builtin::BIfmodl:
case Builtin::BI__builtin_fmod:
case Builtin::BI__builtin_fmodf:
case Builtin::BI__builtin_fmodl: {
Value *Arg1 = EmitScalarExpr(E->getArg(0));
Value *Arg2 = EmitScalarExpr(E->getArg(1));
return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
}
case Builtin::BIlog:
case Builtin::BIlogf:
case Builtin::BIlogl:
case Builtin::BI__builtin_log:
case Builtin::BI__builtin_logf:
case Builtin::BI__builtin_logl:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log));
case Builtin::BIlog10:
case Builtin::BIlog10f:
case Builtin::BIlog10l:
case Builtin::BI__builtin_log10:
case Builtin::BI__builtin_log10f:
case Builtin::BI__builtin_log10l:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log10));
case Builtin::BIlog2:
case Builtin::BIlog2f:
case Builtin::BIlog2l:
case Builtin::BI__builtin_log2:
case Builtin::BI__builtin_log2f:
case Builtin::BI__builtin_log2l:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log2));
case Builtin::BInearbyint:
case Builtin::BInearbyintf:
case Builtin::BInearbyintl:
case Builtin::BI__builtin_nearbyint:
case Builtin::BI__builtin_nearbyintf:
case Builtin::BI__builtin_nearbyintl:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::nearbyint));
case Builtin::BIpow:
case Builtin::BIpowf:
case Builtin::BIpowl:
case Builtin::BI__builtin_pow:
case Builtin::BI__builtin_powf:
case Builtin::BI__builtin_powl:
return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::pow));
case Builtin::BIrint:
case Builtin::BIrintf:
case Builtin::BIrintl:
case Builtin::BI__builtin_rint:
case Builtin::BI__builtin_rintf:
case Builtin::BI__builtin_rintl:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::rint));
case Builtin::BIround:
case Builtin::BIroundf:
case Builtin::BIroundl:
case Builtin::BI__builtin_round:
case Builtin::BI__builtin_roundf:
case Builtin::BI__builtin_roundl:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::round));
case Builtin::BIsin:
case Builtin::BIsinf:
case Builtin::BIsinl:
case Builtin::BI__builtin_sin:
case Builtin::BI__builtin_sinf:
case Builtin::BI__builtin_sinl:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sin));
case Builtin::BIsqrt:
case Builtin::BIsqrtf:
case Builtin::BIsqrtl:
case Builtin::BI__builtin_sqrt:
case Builtin::BI__builtin_sqrtf:
case Builtin::BI__builtin_sqrtl:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sqrt));
case Builtin::BItrunc:
case Builtin::BItruncf:
case Builtin::BItruncl:
case Builtin::BI__builtin_trunc:
case Builtin::BI__builtin_truncf:
case Builtin::BI__builtin_truncl:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::trunc));
default:
break;
}
}
switch (BuiltinID) {
default: break;
case Builtin::BI__builtin___CFStringMakeConstantString:
case Builtin::BI__builtin___NSStringMakeConstantString:
return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
case Builtin::BI__builtin_stdarg_start:
case Builtin::BI__builtin_va_start:
case Builtin::BI__va_start:
case Builtin::BI__builtin_va_end:
return RValue::get(
EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
? EmitScalarExpr(E->getArg(0))
: EmitVAListRef(E->getArg(0)).getPointer(),
BuiltinID != Builtin::BI__builtin_va_end));
case Builtin::BI__builtin_va_copy: {
Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
llvm::Type *Type = Int8PtrTy;
DstPtr = Builder.CreateBitCast(DstPtr, Type);
SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
{DstPtr, SrcPtr}));
}
case Builtin::BI__builtin_abs:
case Builtin::BI__builtin_labs:
case Builtin::BI__builtin_llabs: {
// X < 0 ? -X : X
// The negation has 'nsw' because abs of INT_MIN is undefined.
Value *ArgValue = EmitScalarExpr(E->getArg(0));
Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
return RValue::get(Result);
}
case Builtin::BI__builtin_conj:
case Builtin::BI__builtin_conjf:
case Builtin::BI__builtin_conjl: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
Value *Real = ComplexVal.first;
Value *Imag = ComplexVal.second;
Value *Zero =
Imag->getType()->isFPOrFPVectorTy()
? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
: llvm::Constant::getNullValue(Imag->getType());
Imag = Builder.CreateFSub(Zero, Imag, "sub");
return RValue::getComplex(std::make_pair(Real, Imag));
}
case Builtin::BI__builtin_creal:
case Builtin::BI__builtin_crealf:
case Builtin::BI__builtin_creall:
case Builtin::BIcreal:
case Builtin::BIcrealf:
case Builtin::BIcreall: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
return RValue::get(ComplexVal.first);
}
case Builtin::BI__builtin_dump_struct: {
Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
QualType Arg0Type = Arg0->getType()->getPointeeType();
Value *RecordPtr = EmitScalarExpr(Arg0);
Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align, Func, 0);
return RValue::get(Res);
}
case Builtin::BI__builtin_cimag:
case Builtin::BI__builtin_cimagf:
case Builtin::BI__builtin_cimagl:
case Builtin::BIcimag:
case Builtin::BIcimagf:
case Builtin::BIcimagl: {
ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
return RValue::get(ComplexVal.second);
}
case Builtin::BI__builtin_ctzs:
case Builtin::BI__builtin_ctz:
case Builtin::BI__builtin_ctzl:
case Builtin::BI__builtin_ctzll: {
Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_clzs:
case Builtin::BI__builtin_clz:
case Builtin::BI__builtin_clzl:
case Builtin::BI__builtin_clzll: {
Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_ffs:
case Builtin::BI__builtin_ffsl:
case Builtin::BI__builtin_ffsll: {
// ffs(x) -> x ? cttz(x) + 1 : 0
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp =
Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
llvm::ConstantInt::get(ArgType, 1));
Value *Zero = llvm::Constant::getNullValue(ArgType);
Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__builtin_parity:
case Builtin::BI__builtin_parityl:
case Builtin::BI__builtin_parityll: {
// parity(x) -> ctpop(x) & 1
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Tmp = Builder.CreateCall(F, ArgValue);
Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI__popcnt16:
case Builtin::BI__popcnt:
case Builtin::BI__popcnt64:
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateCall(F, ArgValue);
if (Result->getType() != ResultType)
Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
"cast");
return RValue::get(Result);
}
case Builtin::BI_rotr8:
case Builtin::BI_rotr16:
case Builtin::BI_rotr:
case Builtin::BI_lrotr:
case Builtin::BI_rotr64: {
Value *Val = EmitScalarExpr(E->getArg(0));
Value *Shift = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = Val->getType();
Shift = Builder.CreateIntCast(Shift, ArgType, false);
unsigned ArgWidth = ArgType->getIntegerBitWidth();
Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
Value *RightShiftAmt = Builder.CreateAnd(Shift, Mask);
Value *RightShifted = Builder.CreateLShr(Val, RightShiftAmt);
Value *LeftShiftAmt = Builder.CreateAnd(Builder.CreateNeg(Shift), Mask);
Value *LeftShifted = Builder.CreateShl(Val, LeftShiftAmt);
Value *Result = Builder.CreateOr(LeftShifted, RightShifted);
return RValue::get(Result);
}
case Builtin::BI_rotl8:
case Builtin::BI_rotl16:
case Builtin::BI_rotl:
case Builtin::BI_lrotl:
case Builtin::BI_rotl64: {
Value *Val = EmitScalarExpr(E->getArg(0));
Value *Shift = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = Val->getType();
Shift = Builder.CreateIntCast(Shift, ArgType, false);
unsigned ArgWidth = ArgType->getIntegerBitWidth();
Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
Value *LeftShiftAmt = Builder.CreateAnd(Shift, Mask);
Value *LeftShifted = Builder.CreateShl(Val, LeftShiftAmt);
Value *RightShiftAmt = Builder.CreateAnd(Builder.CreateNeg(Shift), Mask);
Value *RightShifted = Builder.CreateLShr(Val, RightShiftAmt);
Value *Result = Builder.CreateOr(LeftShifted, RightShifted);
return RValue::get(Result);
}
case Builtin::BI__builtin_unpredictable: {
// Always return the argument of __builtin_unpredictable. LLVM does not
// handle this builtin. Metadata for this builtin should be added directly
// to instructions such as branches or switches that use it.
return RValue::get(EmitScalarExpr(E->getArg(0)));
}
case Builtin::BI__builtin_expect: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
// Don't generate llvm.expect on -O0 as the backend won't use it for
// anything.
// Note, we still IRGen ExpectedValue because it could have side-effects.
if (CGM.getCodeGenOpts().OptimizationLevel == 0)
return RValue::get(ArgValue);
Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
Value *Result =
Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
return RValue::get(Result);
}
case Builtin::BI__builtin_assume_aligned: {
Value *PtrValue = EmitScalarExpr(E->getArg(0));
Value *OffsetValue =
(E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
unsigned Alignment = (unsigned) AlignmentCI->getZExtValue();
EmitAlignmentAssumption(PtrValue, Alignment, OffsetValue);
return RValue::get(PtrValue);
}
case Builtin::BI__assume:
case Builtin::BI__builtin_assume: {
if (E->getArg(0)->HasSideEffects(getContext()))
return RValue::get(nullptr);
Value *ArgValue = EmitScalarExpr(E->getArg(0));
Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
}
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
}
case Builtin::BI__builtin_bitreverse8:
case Builtin::BI__builtin_bitreverse16:
case Builtin::BI__builtin_bitreverse32:
case Builtin::BI__builtin_bitreverse64: {
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
}
case Builtin::BI__builtin_object_size: {
unsigned Type =
E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
// We pass this builtin onto the optimizer so that it can figure out the
// object size in more complex cases.
return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
/*EmittedE=*/nullptr));
}
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
// FIXME: Technically these constants should of type 'int', yes?
RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
llvm::ConstantInt::get(Int32Ty, 0);
Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
llvm::ConstantInt::get(Int32Ty, 3);
Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
}
case Builtin::BI__builtin_readcyclecounter: {
Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin___clear_cache: {
Value *Begin = EmitScalarExpr(E->getArg(0));
Value *End = EmitScalarExpr(E->getArg(1));
Value *F = CGM.getIntrinsic(Intrinsic::clear_cache);
return RValue::get(Builder.CreateCall(F, {Begin, End}));
}
case Builtin::BI__builtin_trap:
return RValue::get(EmitTrapCall(Intrinsic::trap));
case Builtin::BI__debugbreak:
return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
case Builtin::BI__builtin_unreachable: {
EmitUnreachable(E->getExprLoc());
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("unreachable.cont"));
return RValue::get(nullptr);
}
case Builtin::BI__builtin_powi:
case Builtin::BI__builtin_powif:
case Builtin::BI__builtin_powil: {
Value *Base = EmitScalarExpr(E->getArg(0));
Value *Exponent = EmitScalarExpr(E->getArg(1));
llvm::Type *ArgType = Base->getType();
Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
}
case Builtin::BI__builtin_isgreater:
case Builtin::BI__builtin_isgreaterequal:
case Builtin::BI__builtin_isless:
case Builtin::BI__builtin_islessequal:
case Builtin::BI__builtin_islessgreater:
case Builtin::BI__builtin_isunordered: {
// Ordered comparisons: we know the arguments to these are matching scalar
// floating point values.
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
switch (BuiltinID) {
default: llvm_unreachable("Unknown ordered comparison");
case Builtin::BI__builtin_isgreater:
LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_isgreaterequal:
LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_isless:
LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_islessequal:
LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_islessgreater:
LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
break;
case Builtin::BI__builtin_isunordered:
LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
break;
}
// ZExt bool to int type.
return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isnan: {
Value *V = EmitScalarExpr(E->getArg(0));
V = Builder.CreateFCmpUNO(V, V, "cmp");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
case Builtin::BIfinite:
case Builtin::BI__finite:
case Builtin::BIfinitef:
case Builtin::BI__finitef:
case Builtin::BIfinitel:
case Builtin::BI__finitel:
case Builtin::BI__builtin_isinf:
case Builtin::BI__builtin_isfinite: {
// isinf(x) --> fabs(x) == infinity
// isfinite(x) --> fabs(x) != infinity
// x != NaN via the ordered compare in either case.
Value *V = EmitScalarExpr(E->getArg(0));
Value *Fabs = EmitFAbs(*this, V);
Constant *Infinity = ConstantFP::getInfinity(V->getType());
CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
? CmpInst::FCMP_OEQ
: CmpInst::FCMP_ONE;
Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isinf_sign: {
// isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
Value *Arg = EmitScalarExpr(E->getArg(0));
Value *AbsArg = EmitFAbs(*this, Arg);
Value *IsInf = Builder.CreateFCmpOEQ(
AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
Value *IsNeg = EmitSignBit(*this, Arg);
llvm::Type *IntTy = ConvertType(E->getType());
Value *Zero = Constant::getNullValue(IntTy);
Value *One = ConstantInt::get(IntTy, 1);
Value *NegativeOne = ConstantInt::get(IntTy, -1);
Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
return RValue::get(Result);
}
case Builtin::BI__builtin_isnormal: {
// isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
Value *Abs = EmitFAbs(*this, V);
Value *IsLessThanInf =
Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
APFloat Smallest = APFloat::getSmallestNormalized(
getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
Value *IsNormal =
Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
"isnormal");
V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
V = Builder.CreateAnd(V, IsNormal, "and");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
case Builtin::BI__builtin_fpclassify: {
Value *V = EmitScalarExpr(E->getArg(5));
llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
// Create Result
BasicBlock *Begin = Builder.GetInsertBlock();
BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
Builder.SetInsertPoint(End);
PHINode *Result =
Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
"fpclassify_result");
// if (V==0) return FP_ZERO
Builder.SetInsertPoint(Begin);
Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
"iszero");
Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
Builder.CreateCondBr(IsZero, End, NotZero);
Result->addIncoming(ZeroLiteral, Begin);
// if (V != V) return FP_NAN
Builder.SetInsertPoint(NotZero);
Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
Value *NanLiteral = EmitScalarExpr(E->getArg(0));
BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
Builder.CreateCondBr(IsNan, End, NotNan);
Result->addIncoming(NanLiteral, NotZero);
// if (fabs(V) == infinity) return FP_INFINITY
Builder.SetInsertPoint(NotNan);
Value *VAbs = EmitFAbs(*this, V);
Value *IsInf =
Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
"isinf");
Value *InfLiteral = EmitScalarExpr(E->getArg(1));
BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
Builder.CreateCondBr(IsInf, End, NotInf);
Result->addIncoming(InfLiteral, NotNan);
// if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
Builder.SetInsertPoint(NotInf);
APFloat Smallest = APFloat::getSmallestNormalized(
getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
Value *IsNormal =
Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
"isnormal");
Value *NormalResult =
Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
EmitScalarExpr(E->getArg(3)));
Builder.CreateBr(End);
Result->addIncoming(NormalResult, NotInf);
// return Result
Builder.SetInsertPoint(End);
return RValue::get(Result);
}
case Builtin::BIalloca:
case Builtin::BI_alloca:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
const TargetInfo &TI = getContext().getTargetInfo();
// The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
unsigned SuitableAlignmentInBytes =
CGM.getContext()
.toCharUnitsFromBits(TI.getSuitableAlign())
.getQuantity();
AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
AI->setAlignment(SuitableAlignmentInBytes);
return RValue::get(AI);
}
case Builtin::BI__builtin_alloca_with_align: {
Value *Size = EmitScalarExpr(E->getArg(0));
Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
unsigned AlignmentInBytes =
CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getQuantity();
AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
AI->setAlignment(AlignmentInBytes);
return RValue::get(AI);
}
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *SizeVal = EmitScalarExpr(E->getArg(1));
EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
return RValue::get(nullptr);
}
case Builtin::BImemcpy:
case Builtin::BI__builtin_memcpy: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
E->getArg(1)->getExprLoc(), FD, 1);
Builder.CreateMemCpy(Dest, Src, SizeVal, false);
return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin_char_memchr:
BuiltinID = Builtin::BI__builtin_memchr;
break;
case Builtin::BI__builtin___memcpy_chk: {
// fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
llvm::APSInt Size, DstSize;
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemCpy(Dest, Src, SizeVal, false);
return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin_objc_memmove_collectable: {
Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
DestAddr, SrcAddr, SizeVal);
return RValue::get(DestAddr.getPointer());
}
case Builtin::BI__builtin___memmove_chk: {
// fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
llvm::APSInt Size, DstSize;
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemMove(Dest, Src, SizeVal, false);
return RValue::get(Dest.getPointer());
}
case Builtin::BImemmove:
case Builtin::BI__builtin_memmove: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *SizeVal = EmitScalarExpr(E->getArg(2));
EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
E->getArg(1)->getExprLoc(), FD, 1);
Builder.CreateMemMove(Dest, Src, SizeVal, false);
return RValue::get(Dest.getPointer());
}
case Builtin::BImemset:
case Builtin::BI__builtin_memset: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = EmitScalarExpr(E->getArg(2));
EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
llvm::APSInt Size, DstSize;
if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
!E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
break;
if (Size.ugt(DstSize))
break;
Address Dest = EmitPointerWithAlignment(E->getArg(0));
Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
Builder.getInt8Ty());
Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
return RValue::get(Dest.getPointer());
}
case Builtin::BI__builtin_wmemcmp: {
// The MSVC runtime library does not provide a definition of wmemcmp, so we
// need an inline implementation.
if (!getTarget().getTriple().isOSMSVCRT())
break;
llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
Value *Dst = EmitScalarExpr(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *Size = EmitScalarExpr(E->getArg(2));
BasicBlock *Entry = Builder.GetInsertBlock();
BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
BasicBlock *Next = createBasicBlock("wmemcmp.next");
BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
EmitBlock(CmpGT);
PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
DstPhi->addIncoming(Dst, Entry);
PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
SrcPhi->addIncoming(Src, Entry);
PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
SizePhi->addIncoming(Size, Entry);
CharUnits WCharAlign =
getContext().getTypeAlignInChars(getContext().WCharTy);
Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
EmitBlock(CmpLT);
Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
Builder.CreateCondBr(DstLtSrc, Exit, Next);
EmitBlock(Next);
Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
Value *NextSizeEq0 =
Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
DstPhi->addIncoming(NextDst, Next);
SrcPhi->addIncoming(NextSrc, Next);
SizePhi->addIncoming(NextSize, Next);
EmitBlock(Exit);
PHINode *Ret = Builder.CreatePHI(IntTy, 4);
Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
return RValue::get(Ret);
}
case Builtin::BI__builtin_dwarf_cfa: {
// The offset in bytes from the first argument to the CFA.
//
// Why on earth is this in the frontend? Is there any reason at
// all that the backend can't reasonably determine this while
// lowering llvm.eh.dwarf.cfa()?
//
// TODO: If there's a satisfactory reason, add a target hook for
// this instead of hard-coding 0, which is correct for most targets.
int32_t Offset = 0;
Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
return RValue::get(Builder.CreateCall(F,
llvm::ConstantInt::get(Int32Ty, Offset)));
}
case Builtin::BI__builtin_return_address: {
Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
getContext().UnsignedIntTy);
Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI_ReturnAddress: {
Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
}
case Builtin::BI__builtin_frame_address: {
Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
getContext().UnsignedIntTy);
Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_extract_return_addr: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
return RValue::get(Result);
}
case Builtin::BI__builtin_frob_return_addr: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
return RValue::get(Result);
}
case Builtin::BI__builtin_dwarf_sp_column: {
llvm::IntegerType *Ty
= cast<llvm::IntegerType>(ConvertType(E->getType()));
int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
if (Column == -1) {
CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
return RValue::get(llvm::UndefValue::get(Ty));
}
return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
}
case Builtin::BI__builtin_init_dwarf_reg_size_table: {
Value *Address = EmitScalarExpr(E->getArg(0));
if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
}
case Builtin::BI__builtin_eh_return: {
Value *Int = EmitScalarExpr(E->getArg(0));
Value *Ptr = EmitScalarExpr(E->getArg(1));
llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
"LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
? Intrinsic::eh_return_i32
: Intrinsic::eh_return_i64);
Builder.CreateCall(F, {Int, Ptr});
Builder.CreateUnreachable();
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("builtin_eh_return.cont"));
return RValue::get(nullptr);
}
case Builtin::BI__builtin_unwind_init: {
Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_extend_pointer: {
// Extends a pointer to the size of an _Unwind_Word, which is
// uint64_t on all platforms. Generally this gets poked into a
// register and eventually used as an address, so if the
// addressing registers are wider than pointers and the platform
// doesn't implicitly ignore high-order bits when doing
// addressing, we need to make sure we zext / sext based on
// the platform's expectations.
//
// See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
// Cast the pointer to intptr_t.
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
// If that's 64 bits, we're done.
if (IntPtrTy->getBitWidth() == 64)
return RValue::get(Result);
// Otherwise, ask the codegen data what to do.
if (getTargetHooks().extendPointerWithSExt())
return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
else
return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
}
case Builtin::BI__builtin_setjmp: {
// Buffer is a void**.
Address Buf = EmitPointerWithAlignment(E->getArg(0));
// Store the frame pointer to the setjmp buffer.
Value *FrameAddr =
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
ConstantInt::get(Int32Ty, 0));
Builder.CreateStore(FrameAddr, Buf);
// Store the stack pointer to the setjmp buffer.
Value *StackAddr =
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
Address StackSaveSlot =
Builder.CreateConstInBoundsGEP(Buf, 2, getPointerSize());
Builder.CreateStore(StackAddr, StackSaveSlot);
// Call LLVM's EH setjmp, which is lightweight.
Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
}
case Builtin::BI__builtin_longjmp: {
Value *Buf = EmitScalarExpr(E->getArg(0));
Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
// Call LLVM's EH longjmp, which is lightweight.
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
// longjmp doesn't return; mark this as unreachable.
Builder.CreateUnreachable();
// We do need to preserve an insertion point.
EmitBlock(createBasicBlock("longjmp.cont"));
return RValue::get(nullptr);
}
case Builtin::BI__sync_fetch_and_add:
case Builtin::BI__sync_fetch_and_sub:
case Builtin::BI__sync_fetch_and_or:
case Builtin::BI__sync_fetch_and_and:
case Builtin::BI__sync_fetch_and_xor:
case Builtin::BI__sync_fetch_and_nand:
case Builtin::BI__sync_add_and_fetch:
case Builtin::BI__sync_sub_and_fetch:
case Builtin::BI__sync_and_and_fetch:
case Builtin::BI__sync_or_and_fetch:
case Builtin::BI__sync_xor_and_fetch:
case Builtin::BI__sync_nand_and_fetch:
case Builtin::BI__sync_val_compare_and_swap:
case Builtin::BI__sync_bool_compare_and_swap:
case Builtin::BI__sync_lock_test_and_set:
case Builtin::BI__sync_lock_release:
case Builtin::BI__sync_swap:
llvm_unreachable("Shouldn't make it through sema");
case Builtin::BI__sync_fetch_and_add_1:
case Builtin::BI__sync_fetch_and_add_2:
case Builtin::BI__sync_fetch_and_add_4:
case Builtin::BI__sync_fetch_and_add_8:
case Builtin::BI__sync_fetch_and_add_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
case Builtin::BI__sync_fetch_and_sub_1:
case Builtin::BI__sync_fetch_and_sub_2:
case Builtin::BI__sync_fetch_and_sub_4:
case Builtin::BI__sync_fetch_and_sub_8:
case Builtin::BI__sync_fetch_and_sub_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
case Builtin::BI__sync_fetch_and_or_1:
case Builtin::BI__sync_fetch_and_or_2:
case Builtin::BI__sync_fetch_and_or_4:
case Builtin::BI__sync_fetch_and_or_8:
case Builtin::BI__sync_fetch_and_or_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
case Builtin::BI__sync_fetch_and_and_1:
case Builtin::BI__sync_fetch_and_and_2:
case Builtin::BI__sync_fetch_and_and_4:
case Builtin::BI__sync_fetch_and_and_8:
case Builtin::BI__sync_fetch_and_and_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
case Builtin::BI__sync_fetch_and_xor_1:
case Builtin::BI__sync_fetch_and_xor_2:
case Builtin::BI__sync_fetch_and_xor_4:
case Builtin::BI__sync_fetch_and_xor_8:
case Builtin::BI__sync_fetch_and_xor_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
case Builtin::BI__sync_fetch_and_nand_1:
case Builtin::BI__sync_fetch_and_nand_2:
case Builtin::BI__sync_fetch_and_nand_4:
case Builtin::BI__sync_fetch_and_nand_8:
case Builtin::BI__sync_fetch_and_nand_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
// Clang extensions: not overloaded yet.
case Builtin::BI__sync_fetch_and_min:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
case Builtin::BI__sync_fetch_and_max:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
case Builtin::BI__sync_fetch_and_umin:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
case Builtin::BI__sync_fetch_and_umax:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
case Builtin::BI__sync_add_and_fetch_1:
case Builtin::BI__sync_add_and_fetch_2:
case Builtin::BI__sync_add_and_fetch_4:
case Builtin::BI__sync_add_and_fetch_8:
case Builtin::BI__sync_add_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
llvm::Instruction::Add);
case Builtin::BI__sync_sub_and_fetch_1:
case Builtin::BI__sync_sub_and_fetch_2:
case Builtin::BI__sync_sub_and_fetch_4:
case Builtin::BI__sync_sub_and_fetch_8:
case Builtin::BI__sync_sub_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
llvm::Instruction::Sub);
case Builtin::BI__sync_and_and_fetch_1:
case Builtin::BI__sync_and_and_fetch_2:
case Builtin::BI__sync_and_and_fetch_4:
case Builtin::BI__sync_and_and_fetch_8:
case Builtin::BI__sync_and_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
llvm::Instruction::And);
case Builtin::BI__sync_or_and_fetch_1:
case Builtin::BI__sync_or_and_fetch_2:
case Builtin::BI__sync_or_and_fetch_4:
case Builtin::BI__sync_or_and_fetch_8:
case Builtin::BI__sync_or_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
llvm::Instruction::Or);
case Builtin::BI__sync_xor_and_fetch_1:
case Builtin::BI__sync_xor_and_fetch_2:
case Builtin::BI__sync_xor_and_fetch_4:
case Builtin::BI__sync_xor_and_fetch_8:
case Builtin::BI__sync_xor_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
llvm::Instruction::Xor);
case Builtin::BI__sync_nand_and_fetch_1:
case Builtin::BI__sync_nand_and_fetch_2:
case Builtin::BI__sync_nand_and_fetch_4:
case Builtin::BI__sync_nand_and_fetch_8:
case Builtin::BI__sync_nand_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
llvm::Instruction::And, true);
case Builtin::BI__sync_val_compare_and_swap_1:
case Builtin::BI__sync_val_compare_and_swap_2:
case Builtin::BI__sync_val_compare_and_swap_4:
case Builtin::BI__sync_val_compare_and_swap_8:
case Builtin::BI__sync_val_compare_and_swap_16:
return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
case Builtin::BI__sync_bool_compare_and_swap_1:
case Builtin::BI__sync_bool_compare_and_swap_2:
case Builtin::BI__sync_bool_compare_and_swap_4:
case Builtin::BI__sync_bool_compare_and_swap_8:
case Builtin::BI__sync_bool_compare_and_swap_16:
return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
case Builtin::BI__sync_swap_1:
case Builtin::BI__sync_swap_2:
case Builtin::BI__sync_swap_4:
case Builtin::BI__sync_swap_8:
case Builtin::BI__sync_swap_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
case Builtin::BI__sync_lock_test_and_set_1:
case Builtin::BI__sync_lock_test_and_set_2:
case Builtin::BI__sync_lock_test_and_set_4:
case Builtin::BI__sync_lock_test_and_set_8:
case Builtin::BI__sync_lock_test_and_set_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
case Builtin::BI__sync_lock_release_1:
case Builtin::BI__sync_lock_release_2:
case Builtin::BI__sync_lock_release_4:
case Builtin::BI__sync_lock_release_8:
case Builtin::BI__sync_lock_release_16: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
StoreSize.getQuantity() * 8);
Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::StoreInst *Store =
Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
StoreSize);
Store->setAtomic(llvm::AtomicOrdering::Release);
return RValue::get(nullptr);
}
case Builtin::BI__sync_synchronize: {
// We assume this is supposed to correspond to a C++0x-style
// sequentially-consistent fence (i.e. this is only usable for
// synchronization, not device I/O or anything like that). This intrinsic
// is really badly designed in the sense that in theory, there isn't
// any way to safely use it... but in practice, it mostly works
// to use it with non-atomic loads and stores to get acquire/release
// semantics.
Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
return RValue::get(nullptr);
}
case Builtin::BI__builtin_nontemporal_load:
return RValue::get(EmitNontemporalLoad(*this, E));
case Builtin::BI__builtin_nontemporal_store:
return RValue::get(EmitNontemporalStore(*this, E));
case Builtin::BI__c11_atomic_is_lock_free:
case Builtin::BI__atomic_is_lock_free: {
// Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
// __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
// _Atomic(T) is always properly-aligned.
const char *LibCallName = "__atomic_is_lock_free";
CallArgList Args;
Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
getContext().getSizeType());
if (BuiltinID == Builtin::BI__atomic_is_lock_free)
Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
getContext().VoidPtrTy);
else
Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
getContext().VoidPtrTy);
const CGFunctionInfo &FuncInfo =
CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
return EmitCall(FuncInfo, CGCallee::forDirect(Func),
ReturnValueSlot(), Args);
}
case Builtin::BI__atomic_test_and_set: {
// Look at the argument type to determine whether this is a volatile
// operation. The parameter type is always volatile.
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
bool Volatile =
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Value *Ptr = EmitScalarExpr(E->getArg(0));
unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(1);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
AtomicRMWInst *Result = nullptr;
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
llvm::AtomicOrdering::Monotonic);
break;
case 1: // memory_order_consume
case 2: // memory_order_acquire
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
llvm::AtomicOrdering::Acquire);
break;
case 3: // memory_order_release
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
llvm::AtomicOrdering::Release);
break;
case 4: // memory_order_acq_rel
Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
llvm::AtomicOrdering::AcquireRelease);
break;
case 5: // memory_order_seq_cst
Result = Builder.CreateAtomicRMW(
llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
llvm::AtomicOrdering::SequentiallyConsistent);
break;
}
Result->setVolatile(Volatile);
return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
}
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
llvm::BasicBlock *BBs[5] = {
createBasicBlock("monotonic", CurFn),
createBasicBlock("acquire", CurFn),
createBasicBlock("release", CurFn),
createBasicBlock("acqrel", CurFn),
createBasicBlock("seqcst", CurFn)
};
llvm::AtomicOrdering Orders[5] = {
llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
llvm::AtomicOrdering::SequentiallyConsistent};
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
Builder.SetInsertPoint(ContBB);
PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
for (unsigned i = 0; i < 5; ++i) {
Builder.SetInsertPoint(BBs[i]);
AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
Ptr, NewVal, Orders[i]);
RMW->setVolatile(Volatile);
Result->addIncoming(RMW, BBs[i]);
Builder.CreateBr(ContBB);
}
SI->addCase(Builder.getInt32(0), BBs[0]);
SI->addCase(Builder.getInt32(1), BBs[1]);
SI->addCase(Builder.getInt32(2), BBs[1]);
SI->addCase(Builder.getInt32(3), BBs[2]);
SI->addCase(Builder.getInt32(4), BBs[3]);
SI->addCase(Builder.getInt32(5), BBs[4]);
Builder.SetInsertPoint(ContBB);
return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
}
case Builtin::BI__atomic_clear: {
QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
bool Volatile =
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Address Ptr = EmitPointerWithAlignment(E->getArg(0));
unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
Value *NewVal = Builder.getInt8(0);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
Store->setOrdering(llvm::AtomicOrdering::Monotonic);
break;
case 3: // memory_order_release
Store->setOrdering(llvm::AtomicOrdering::Release);
break;
case 5: // memory_order_seq_cst
Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
break;
}
return RValue::get(nullptr);
}
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
llvm::BasicBlock *BBs[3] = {
createBasicBlock("monotonic", CurFn),
createBasicBlock("release", CurFn),
createBasicBlock("seqcst", CurFn)
};
llvm::AtomicOrdering Orders[3] = {
llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
llvm::AtomicOrdering::SequentiallyConsistent};
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
for (unsigned i = 0; i < 3; ++i) {
Builder.SetInsertPoint(BBs[i]);
StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
Store->setOrdering(Orders[i]);
Builder.CreateBr(ContBB);
}
SI->addCase(Builder.getInt32(0), BBs[0]);
SI->addCase(Builder.getInt32(3), BBs[1]);
SI->addCase(Builder.getInt32(5), BBs[2]);
Builder.SetInsertPoint(ContBB);
return RValue::get(nullptr);
}
case Builtin::BI__atomic_thread_fence:
case Builtin::BI__atomic_signal_fence:
case Builtin::BI__c11_atomic_thread_fence:
case Builtin::BI__c11_atomic_signal_fence: {
llvm::SyncScope::ID SSID;
if (BuiltinID == Builtin::BI__atomic_signal_fence ||
BuiltinID == Builtin::BI__c11_atomic_signal_fence)
SSID = llvm::SyncScope::SingleThread;
else
SSID = llvm::SyncScope::System;
Value *Order = EmitScalarExpr(E->getArg(0));
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
switch (ord) {
case 0: // memory_order_relaxed
default: // invalid order
break;
case 1: // memory_order_consume
case 2: // memory_order_acquire
Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
break;
case 3: // memory_order_release
Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
break;
case 4: // memory_order_acq_rel
Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
break;
case 5: // memory_order_seq_cst
Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
break;
}
return RValue::get(nullptr);
}
llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
AcquireBB = createBasicBlock("acquire", CurFn);
ReleaseBB = createBasicBlock("release", CurFn);
AcqRelBB = createBasicBlock("acqrel", CurFn);
SeqCstBB = createBasicBlock("seqcst", CurFn);
llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
Builder.SetInsertPoint(AcquireBB);
Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(1), AcquireBB);
SI->addCase(Builder.getInt32(2), AcquireBB);
Builder.SetInsertPoint(ReleaseBB);
Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(3), ReleaseBB);
Builder.SetInsertPoint(AcqRelBB);
Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(4), AcqRelBB);
Builder.SetInsertPoint(SeqCstBB);
Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
Builder.CreateBr(ContBB);
SI->addCase(Builder.getInt32(5), SeqCstBB);
Builder.SetInsertPoint(ContBB);
return RValue::get(nullptr);
}
case Builtin::BI__builtin_signbit:
case Builtin::BI__builtin_signbitf:
case Builtin::BI__builtin_signbitl: {
return RValue::get(
Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
ConvertType(E->getType())));
}
case Builtin::BI__annotation: {
// Re-encode each wide string to UTF8 and make an MDString.
SmallVector<Metadata *, 1> Strings;
for (const Expr *Arg : E->arguments()) {
const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
assert(Str->getCharByteWidth() == 2);
StringRef WideBytes = Str->getBytes();
std::string StrUtf8;
if (!convertUTF16ToUTF8String(
makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
continue;
}
Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
}
// Build and MDTuple of MDStrings and emit the intrinsic call.
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
return RValue::getIgnored();
}
case Builtin::BI__builtin_annotation: {
llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
AnnVal->getType());
// Get the annotation string, go through casts. Sema requires this to be a
// non-wide string literal, potentially casted, so the cast<> is safe.
const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
}
case Builtin::BI__builtin_addcb:
case Builtin::BI__builtin_addcs:
case Builtin::BI__builtin_addc:
case Builtin::BI__builtin_addcl:
case Builtin::BI__builtin_addcll:
case Builtin::BI__builtin_subcb:
case Builtin::BI__builtin_subcs:
case Builtin::BI__builtin_subc:
case Builtin::BI__builtin_subcl:
case Builtin::BI__builtin_subcll: {
// We translate all of these builtins from expressions of the form:
// int x = ..., y = ..., carryin = ..., carryout, result;
// result = __builtin_addc(x, y, carryin, &carryout);
//
// to LLVM IR of the form:
//
// %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
// %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
// %carry1 = extractvalue {i32, i1} %tmp1, 1
// %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
// i32 %carryin)
// %result = extractvalue {i32, i1} %tmp2, 0
// %carry2 = extractvalue {i32, i1} %tmp2, 1
// %tmp3 = or i1 %carry1, %carry2
// %tmp4 = zext i1 %tmp3 to i32
// store i32 %tmp4, i32* %carryout
// Scalarize our inputs.
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
// Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default: llvm_unreachable("Unknown multiprecision builtin id.");
case Builtin::BI__builtin_addcb:
case Builtin::BI__builtin_addcs:
case Builtin::BI__builtin_addc:
case Builtin::BI__builtin_addcl:
case Builtin::BI__builtin_addcll:
IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_subcb:
case Builtin::BI__builtin_subcs:
case Builtin::BI__builtin_subc:
case Builtin::BI__builtin_subcl:
case Builtin::BI__builtin_subcll:
IntrinsicId = llvm::Intrinsic::usub_with_overflow;
break;
}
// Construct our resulting LLVM IR expression.
llvm::Value *Carry1;
llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
X, Y, Carry1);
llvm::Value *Carry2;
llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
Sum1, Carryin, Carry2);
llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
X->getType());
Builder.CreateStore(CarryOut, CarryOutPtr);
return RValue::get(Sum2);
}
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow: {
const clang::Expr *LeftArg = E->getArg(0);
const clang::Expr *RightArg = E->getArg(1);
const clang::Expr *ResultArg = E->getArg(2);
clang::QualType ResultQTy =
ResultArg->getType()->castAs<PointerType>()->getPointeeType();
WidthAndSignedness LeftInfo =
getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
WidthAndSignedness RightInfo =
getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
WidthAndSignedness ResultInfo =
getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
// Handle mixed-sign multiplication as a special case, because adding
// runtime or backend support for our generic irgen would be too expensive.
if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
RightInfo, ResultArg, ResultQTy,
ResultInfo);
WidthAndSignedness EncompassingInfo =
EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
llvm::Type *EncompassingLLVMTy =
llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default:
llvm_unreachable("Unknown overflow builtin id.");
case Builtin::BI__builtin_add_overflow:
IntrinsicId = EncompassingInfo.Signed
? llvm::Intrinsic::sadd_with_overflow
: llvm::Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_sub_overflow:
IntrinsicId = EncompassingInfo.Signed
? llvm::Intrinsic::ssub_with_overflow
: llvm::Intrinsic::usub_with_overflow;
break;
case Builtin::BI__builtin_mul_overflow:
IntrinsicId = EncompassingInfo.Signed
? llvm::Intrinsic::smul_with_overflow
: llvm::Intrinsic::umul_with_overflow;
break;
}
llvm::Value *Left = EmitScalarExpr(LeftArg);
llvm::Value *Right = EmitScalarExpr(RightArg);
Address ResultPtr = EmitPointerWithAlignment(ResultArg);
// Extend each operand to the encompassing type.
Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
// Perform the operation on the extended values.
llvm::Value *Overflow, *Result;
Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
if (EncompassingInfo.Width > ResultInfo.Width) {
// The encompassing type is wider than the result type, so we need to
// truncate it.
llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
// To see if the truncation caused an overflow, we will extend
// the result and then compare it to the original result.
llvm::Value *ResultTruncExt = Builder.CreateIntCast(
ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
llvm::Value *TruncationOverflow =
Builder.CreateICmpNE(Result, ResultTruncExt);
Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
Result = ResultTrunc;
}
// Finally, store the result using the pointer.
bool isVolatile =
ResultArg->getType()->getPointeeType().isVolatileQualified();
Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
return RValue::get(Overflow);
}
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
case Builtin::BI__builtin_usub_overflow:
case Builtin::BI__builtin_usubl_overflow:
case Builtin::BI__builtin_usubll_overflow:
case Builtin::BI__builtin_umul_overflow:
case Builtin::BI__builtin_umull_overflow:
case Builtin::BI__builtin_umulll_overflow:
case Builtin::BI__builtin_sadd_overflow:
case Builtin::BI__builtin_saddl_overflow:
case Builtin::BI__builtin_saddll_overflow:
case Builtin::BI__builtin_ssub_overflow:
case Builtin::BI__builtin_ssubl_overflow:
case Builtin::BI__builtin_ssubll_overflow:
case Builtin::BI__builtin_smul_overflow:
case Builtin::BI__builtin_smull_overflow:
case Builtin::BI__builtin_smulll_overflow: {
// We translate all of these builtins directly to the relevant llvm IR node.
// Scalarize our inputs.
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
// Decide which of the overflow intrinsics we are lowering to:
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default: llvm_unreachable("Unknown overflow builtin id.");
case Builtin::BI__builtin_uadd_overflow:
case Builtin::BI__builtin_uaddl_overflow:
case Builtin::BI__builtin_uaddll_overflow:
IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
break;
case Builtin::BI__builtin_usub_overflow:
case Builtin::BI__builtin_usubl_overflow:
case Builtin::BI__builtin_usubll_overflow:
IntrinsicId = llvm::Intrinsic::usub_with_overflow;
break;
case Builtin::BI__builtin_umul_overflow:
case Builtin::BI__builtin_umull_overflow:
case Builtin::BI__builtin_umulll_overflow:
IntrinsicId = llvm::Intrinsic::umul_with_overflow;
break;
case Builtin::BI__builtin_sadd_overflow:
case Builtin::BI__builtin_saddl_overflow:
case Builtin::BI__builtin_saddll_overflow:
IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
break;
case Builtin::BI__builtin_ssub_overflow:
case Builtin::BI__builtin_ssubl_overflow:
case Builtin::BI__builtin_ssubll_overflow:
IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
break;
case Builtin::BI__builtin_smul_overflow:
case Builtin::BI__builtin_smull_overflow:
case Builtin::BI__builtin_smulll_overflow:
IntrinsicId = llvm::Intrinsic::smul_with_overflow;
break;
}
llvm::Value *Carry;
llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
Builder.CreateStore(Sum, SumOutPtr);
return RValue::get(Carry);
}
case Builtin::BI__builtin_addressof:
return RValue::get(EmitLValue(E->getArg(0)).getPointer());
case Builtin::BI__builtin_operator_new:
return EmitBuiltinNewDeleteCall(
E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
case Builtin::BI__builtin_operator_delete:
return EmitBuiltinNewDeleteCall(
E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
case Builtin::BI__noop:
// __noop always evaluates to an integer literal zero.
return RValue::get(ConstantInt::get(IntTy, 0));
case Builtin::BI__builtin_call_with_static_chain: {
const CallExpr *Call = cast<CallExpr>(E->getArg(0));
const Expr *Chain = E->getArg(1);
return EmitCall(Call->getCallee()->getType(),
EmitCallee(Call->getCallee()), Call, ReturnValue,
EmitScalarExpr(Chain));
}
case Builtin::BI_InterlockedExchange8:
case Builtin::BI_InterlockedExchange16:
case Builtin::BI_InterlockedExchange:
case Builtin::BI_InterlockedExchangePointer:
return RValue::get(
EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
case Builtin::BI_InterlockedCompareExchangePointer: {
llvm::Type *RTy;
llvm::IntegerType *IntType =
IntegerType::get(getLLVMContext(),
getContext().getTypeSize(E->getType()));
llvm::Type *IntPtrType = IntType->getPointerTo();
llvm::Value *Destination =
Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
RTy = Exchange->getType();
Exchange = Builder.CreatePtrToInt(Exchange, IntType);
llvm::Value *Comparand =
Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
auto Result =
Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
AtomicOrdering::SequentiallyConsistent,
AtomicOrdering::SequentiallyConsistent);
Result->setVolatile(true);
return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
0),
RTy));
}
case Builtin::BI_InterlockedCompareExchange8:
case Builtin::BI_InterlockedCompareExchange16:
case Builtin::BI_InterlockedCompareExchange:
case Builtin::BI_InterlockedCompareExchange64: {
AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(2)),
EmitScalarExpr(E->getArg(1)),
AtomicOrdering::SequentiallyConsistent,
AtomicOrdering::SequentiallyConsistent);
CXI->setVolatile(true);
return RValue::get(Builder.CreateExtractValue(CXI, 0));
}
case Builtin::BI_InterlockedIncrement16:
case Builtin::BI_InterlockedIncrement:
return RValue::get(
EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
case Builtin::BI_InterlockedDecrement16:
case Builtin::BI_InterlockedDecrement:
return RValue::get(
EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
case Builtin::BI_InterlockedAnd8:
case Builtin::BI_InterlockedAnd16:
case Builtin::BI_InterlockedAnd:
return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
case Builtin::BI_InterlockedExchangeAdd8:
case Builtin::BI_InterlockedExchangeAdd16:
case Builtin::BI_InterlockedExchangeAdd:
return RValue::get(
EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
case Builtin::BI_InterlockedExchangeSub8:
case Builtin::BI_InterlockedExchangeSub16:
case Builtin::BI_InterlockedExchangeSub:
return RValue::get(
EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
case Builtin::BI_InterlockedOr8:
case Builtin::BI_InterlockedOr16:
case Builtin::BI_InterlockedOr:
return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
case Builtin::BI_InterlockedXor8:
case Builtin::BI_InterlockedXor16:
case Builtin::BI_InterlockedXor:
return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
case Builtin::BI_bittest64:
case Builtin::BI_bittest:
case Builtin::BI_bittestandcomplement64:
case Builtin::BI_bittestandcomplement:
case Builtin::BI_bittestandreset64:
case Builtin::BI_bittestandreset:
case Builtin::BI_bittestandset64:
case Builtin::BI_bittestandset:
case Builtin::BI_interlockedbittestandreset:
case Builtin::BI_interlockedbittestandreset64:
case Builtin::BI_interlockedbittestandset64:
case Builtin::BI_interlockedbittestandset:
case Builtin::BI_interlockedbittestandset_acq:
case Builtin::BI_interlockedbittestandset_rel:
case Builtin::BI_interlockedbittestandset_nf:
case Builtin::BI_interlockedbittestandreset_acq:
case Builtin::BI_interlockedbittestandreset_rel:
case Builtin::BI_interlockedbittestandreset_nf:
return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
case Builtin::BI__exception_code:
case Builtin::BI_exception_code:
return RValue::get(EmitSEHExceptionCode());
case Builtin::BI__exception_info:
case Builtin::BI_exception_info:
return RValue::get(EmitSEHExceptionInfo());
case Builtin::BI__abnormal_termination:
case Builtin::BI_abnormal_termination:
return RValue::get(EmitSEHAbnormalTermination());
case Builtin::BI_setjmpex:
if (getTarget().getTriple().isOSMSVCRT())
return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
break;
case Builtin::BI_setjmp:
if (getTarget().getTriple().isOSMSVCRT()) {
if (getTarget().getTriple().getArch() == llvm::Triple::x86)
return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
}
break;
case Builtin::BI__GetExceptionInfo: {
if (llvm::GlobalVariable *GV =
CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
break;
}
case Builtin::BI__fastfail:
return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
case Builtin::BI__builtin_coro_size: {
auto & Context = getContext();
auto SizeTy = Context.getSizeType();
auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
Value *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_coro_id:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
case Builtin::BI__builtin_coro_promise:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
case Builtin::BI__builtin_coro_resume:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
case Builtin::BI__builtin_coro_frame:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
case Builtin::BI__builtin_coro_noop:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
case Builtin::BI__builtin_coro_free:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
case Builtin::BI__builtin_coro_destroy:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
case Builtin::BI__builtin_coro_done:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
case Builtin::BI__builtin_coro_alloc:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
case Builtin::BI__builtin_coro_begin:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
case Builtin::BI__builtin_coro_end:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
case Builtin::BI__builtin_coro_suspend:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
case Builtin::BI__builtin_coro_param:
return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
// OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
case Builtin::BIread_pipe:
case Builtin::BIwrite_pipe: {
Value *Arg0 = EmitScalarExpr(E->getArg(0)),
*Arg1 = EmitScalarExpr(E->getArg(1));
CGOpenCLRuntime OpenCLRT(CGM);
Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
// Type of the generic packet parameter.
unsigned GenericAS =
getContext().getTargetAddressSpace(LangAS::opencl_generic);
llvm::Type *I8PTy = llvm::PointerType::get(
llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
// Testing which overloaded version we should generate the call for.
if (2U == E->getNumArgs()) {
const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
: "__write_pipe_2";
// Creating a generic function type to be able to call with any builtin or
// user defined type.
llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
return RValue::get(
Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
{Arg0, BCast, PacketSize, PacketAlign}));
} else {
assert(4 == E->getNumArgs() &&
"Illegal number of parameters to pipe function");
const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
: "__write_pipe_4";
llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
Int32Ty, Int32Ty};
Value *Arg2 = EmitScalarExpr(E->getArg(2)),
*Arg3 = EmitScalarExpr(E->getArg(3));
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
// We know the third argument is an integer type, but we may need to cast
// it to i32.
if (Arg2->getType() != Int32Ty)
Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
return RValue::get(Builder.CreateCall(
CGM.CreateRuntimeFunction(FTy, Name),
{Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
}
}
// OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
// functions
case Builtin::BIreserve_read_pipe:
case Builtin::BIreserve_write_pipe:
case Builtin::BIwork_group_reserve_read_pipe:
case Builtin::BIwork_group_reserve_write_pipe:
case Builtin::BIsub_group_reserve_read_pipe:
case Builtin::BIsub_group_reserve_write_pipe: {
// Composing the mangled name for the function.
const char *Name;
if (BuiltinID == Builtin::BIreserve_read_pipe)
Name = "__reserve_read_pipe";
else if (BuiltinID == Builtin::BIreserve_write_pipe)
Name = "__reserve_write_pipe";
else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
Name = "__work_group_reserve_read_pipe";
else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
Name = "__work_group_reserve_write_pipe";
else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
Name = "__sub_group_reserve_read_pipe";
else
Name = "__sub_group_reserve_write_pipe";
Value *Arg0 = EmitScalarExpr(E->getArg(0)),
*Arg1 = EmitScalarExpr(E->getArg(1));
llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
CGOpenCLRuntime OpenCLRT(CGM);
Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
// Building the generic function prototype.
llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
// We know the second argument is an integer type, but we may need to cast
// it to i32.
if (Arg1->getType() != Int32Ty)
Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
return RValue::get(
Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
{Arg0, Arg1, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
// functions
case Builtin::BIcommit_read_pipe:
case Builtin::BIcommit_write_pipe:
case Builtin::BIwork_group_commit_read_pipe:
case Builtin::BIwork_group_commit_write_pipe:
case Builtin::BIsub_group_commit_read_pipe:
case Builtin::BIsub_group_commit_write_pipe: {
const char *Name;
if (BuiltinID == Builtin::BIcommit_read_pipe)
Name = "__commit_read_pipe";
else if (BuiltinID == Builtin::BIcommit_write_pipe)
Name = "__commit_write_pipe";
else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
Name = "__work_group_commit_read_pipe";
else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
Name = "__work_group_commit_write_pipe";
else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
Name = "__sub_group_commit_read_pipe";
else
Name = "__sub_group_commit_write_pipe";
Value *Arg0 = EmitScalarExpr(E->getArg(0)),
*Arg1 = EmitScalarExpr(E->getArg(1));
CGOpenCLRuntime OpenCLRT(CGM);
Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
// Building the generic function prototype.
llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
llvm::ArrayRef<llvm::Type *>(ArgTys), false);
return RValue::get(
Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
{Arg0, Arg1, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
case Builtin::BIget_pipe_num_packets:
case Builtin::BIget_pipe_max_packets: {
const char *BaseName;
const PipeType *PipeTy = E->getArg(0)->getType()->getAs<PipeType>();
if (BuiltinID == Builtin::BIget_pipe_num_packets)
BaseName = "__get_pipe_num_packets";
else
BaseName = "__get_pipe_max_packets";
auto Name = std::string(BaseName) +
std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
// Building the generic function prototype.
Value *Arg0 = EmitScalarExpr(E->getArg(0));
CGOpenCLRuntime OpenCLRT(CGM);
Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
{Arg0, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
case Builtin::BIto_global:
case Builtin::BIto_local:
case Builtin::BIto_private: {
auto Arg0 = EmitScalarExpr(E->getArg(0));
auto NewArgT = llvm::PointerType::get(Int8Ty,
CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
auto NewRetT = llvm::PointerType::get(Int8Ty,
CGM.getContext().getTargetAddressSpace(
E->getType()->getPointeeType().getAddressSpace()));
auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
llvm::Value *NewArg;
if (Arg0->getType()->getPointerAddressSpace() !=
NewArgT->getPointerAddressSpace())
NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
else
NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
auto NewCall =
Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
ConvertType(E->getType())));
}
// OpenCL v2.0, s6.13.17 - Enqueue kernel function.
// It contains four different overload formats specified in Table 6.13.17.1.
case Builtin::BIenqueue_kernel: {
StringRef Name; // Generated function call name
unsigned NumArgs = E->getNumArgs();
llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
llvm::Value *Range = NDRangeL.getAddress().getPointer();
llvm::Type *RangeTy = NDRangeL.getAddress().getType();
if (NumArgs == 4) {
// The most basic form of the call with parameters:
// queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
Name = "__enqueue_kernel_basic";
llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
GenericVoidPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
llvm::Value *Kernel =
Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
llvm::Value *Block =
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
AttrBuilder B;
B.addAttribute(Attribute::ByVal);
llvm::AttributeList ByValAttrSet =
llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
auto RTCall =
Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
{Queue, Flags, Range, Kernel, Block});
RTCall->setAttributes(ByValAttrSet);
return RValue::get(RTCall);
}
assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
// Create a temporary array to hold the sizes of local pointer arguments
// for the block. \p First is the position of the first size argument.
auto CreateArrayForSizeVar = [=](unsigned First) {
auto *AT = llvm::ArrayType::get(SizeTy, NumArgs - First);
auto *Arr = Builder.CreateAlloca(AT);
llvm::Value *Ptr;
// Each of the following arguments specifies the size of the corresponding
// argument passed to the enqueued block.
auto *Zero = llvm::ConstantInt::get(IntTy, 0);
for (unsigned I = First; I < NumArgs; ++I) {
auto *Index = llvm::ConstantInt::get(IntTy, I - First);
auto *GEP = Builder.CreateGEP(Arr, {Zero, Index});
if (I == First)
Ptr = GEP;
auto *V =
Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
Builder.CreateAlignedStore(
V, GEP, CGM.getDataLayout().getPrefTypeAlignment(SizeTy));
}
return Ptr;
};
// Could have events and/or varargs.
if (E->getArg(3)->getType()->isBlockPointerType()) {
// No events passed, but has variadic arguments.
Name = "__enqueue_kernel_varargs";
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
llvm::Value *Kernel =
Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
auto *PtrToSizeArray = CreateArrayForSizeVar(4);
// Create a vector of the arguments, as well as a constant value to
// express to the runtime the number of variadic arguments.
std::vector<llvm::Value *> Args = {
Queue, Flags, Range,
Kernel, Block, ConstantInt::get(IntTy, NumArgs - 4),
PtrToSizeArray};
std::vector<llvm::Type *> ArgTys = {
QueueTy, IntTy, RangeTy,
GenericVoidPtrTy, GenericVoidPtrTy, IntTy,
PtrToSizeArray->getType()};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
return RValue::get(
Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
llvm::ArrayRef<llvm::Value *>(Args)));
}
// Any calls now have event arguments passed.
if (NumArgs >= 7) {
llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
llvm::Type *EventPtrTy = EventTy->getPointerTo(
CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
llvm::Value *NumEvents =
Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
llvm::Value *EventList =
E->getArg(4)->getType()->isArrayType()
? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
: EmitScalarExpr(E->getArg(4));
llvm::Value *ClkEvent = EmitScalarExpr(E->getArg(5));
// Convert to generic address space.
EventList = Builder.CreatePointerCast(EventList, EventPtrTy);
ClkEvent = Builder.CreatePointerCast(ClkEvent, EventPtrTy);
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
llvm::Value *Kernel =
Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
llvm::Value *Block =
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
std::vector<llvm::Type *> ArgTys = {
QueueTy, Int32Ty, RangeTy, Int32Ty,
EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
std::vector<llvm::Value *> Args = {Queue, Flags, Range, NumEvents,
EventList, ClkEvent, Kernel, Block};
if (NumArgs == 7) {
// Has events but no variadics.
Name = "__enqueue_kernel_basic_events";
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
return RValue::get(
Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
llvm::ArrayRef<llvm::Value *>(Args)));
}
// Has event info and variadics
// Pass the number of variadics to the runtime function too.
Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
ArgTys.push_back(Int32Ty);
Name = "__enqueue_kernel_events_varargs";
auto *PtrToSizeArray = CreateArrayForSizeVar(7);
Args.push_back(PtrToSizeArray);
ArgTys.push_back(PtrToSizeArray->getType());
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
return RValue::get(
Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
llvm::ArrayRef<llvm::Value *>(Args)));
}
LLVM_FALLTHROUGH;
}
// OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
// parameter.
case Builtin::BIget_kernel_work_group_size: {
llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
return RValue::get(Builder.CreateCall(
CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
false),
"__get_kernel_work_group_size_impl"),
{Kernel, Arg}));
}
case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
return RValue::get(Builder.CreateCall(
CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
false),
"__get_kernel_preferred_work_group_size_multiple_impl"),
{Kernel, Arg}));
}
case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
llvm::Value *NDRange = NDRangeL.getAddress().getPointer();
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
const char *Name =
BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
? "__get_kernel_max_sub_group_size_for_ndrange_impl"
: "__get_kernel_sub_group_count_for_ndrange_impl";
return RValue::get(Builder.CreateCall(
CGM.CreateRuntimeFunction(
llvm::FunctionType::get(
IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
false),
Name),
{NDRange, Kernel, Block}));
}
case Builtin::BI__builtin_store_half:
case Builtin::BI__builtin_store_halff: {
Value *Val = EmitScalarExpr(E->getArg(0));
Address Address = EmitPointerWithAlignment(E->getArg(1));
Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
return RValue::get(Builder.CreateStore(HalfVal, Address));
}
case Builtin::BI__builtin_load_half: {
Address Address = EmitPointerWithAlignment(E->getArg(0));
Value *HalfVal = Builder.CreateLoad(Address);
return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
}
case Builtin::BI__builtin_load_halff: {
Address Address = EmitPointerWithAlignment(E->getArg(0));
Value *HalfVal = Builder.CreateLoad(Address);
return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
}
case Builtin::BIprintf:
if (getTarget().getTriple().isNVPTX())
return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
break;
case Builtin::BI__builtin_canonicalize:
case Builtin::BI__builtin_canonicalizef:
case Builtin::BI__builtin_canonicalizel:
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
case Builtin::BI__builtin_thread_pointer: {
if (!getContext().getTargetInfo().isTLSSupported())
CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
// Fall through - it's already mapped to the intrinsic by GCCBuiltin.
break;
}
case Builtin::BI__builtin_os_log_format:
return emitBuiltinOSLogFormat(*E);
case Builtin::BI__builtin_os_log_format_buffer_size: {
analyze_os_log::OSLogBufferLayout Layout;
analyze_os_log::computeOSLogBufferLayout(CGM.getContext(), E, Layout);
return RValue::get(ConstantInt::get(ConvertType(E->getType()),
Layout.size().getQuantity()));
}
case Builtin::BI__xray_customevent: {
if (!ShouldXRayInstrumentFunction())
return RValue::getIgnored();
if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
XRayInstrKind::Custom))
return RValue::getIgnored();
if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
return RValue::getIgnored();
Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
auto FTy = F->getFunctionType();
auto Arg0 = E->getArg(0);
auto Arg0Val = EmitScalarExpr(Arg0);
auto Arg0Ty = Arg0->getType();
auto PTy0 = FTy->getParamType(0);
if (PTy0 != Arg0Val->getType()) {
if (Arg0Ty->isArrayType())
Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
else
Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
}
auto Arg1 = EmitScalarExpr(E->getArg(1));
auto PTy1 = FTy->getParamType(1);
if (PTy1 != Arg1->getType())
Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
}
case Builtin::BI__xray_typedevent: {
// TODO: There should be a way to always emit events even if the current
// function is not instrumented. Losing events in a stream can cripple
// a trace.
if (!ShouldXRayInstrumentFunction())
return RValue::getIgnored();
if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
XRayInstrKind::Typed))
return RValue::getIgnored();
if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
return RValue::getIgnored();
Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
auto FTy = F->getFunctionType();
auto Arg0 = EmitScalarExpr(E->getArg(0));
auto PTy0 = FTy->getParamType(0);
if (PTy0 != Arg0->getType())
Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
auto Arg1 = E->getArg(1);
auto Arg1Val = EmitScalarExpr(Arg1);
auto Arg1Ty = Arg1->getType();
auto PTy1 = FTy->getParamType(1);
if (PTy1 != Arg1Val->getType()) {
if (Arg1Ty->isArrayType())
Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
else
Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
}
auto Arg2 = EmitScalarExpr(E->getArg(2));
auto PTy2 = FTy->getParamType(2);
if (PTy2 != Arg2->getType())
Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
}
case Builtin::BI__builtin_ms_va_start:
case Builtin::BI__builtin_ms_va_end:
return RValue::get(
EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
BuiltinID == Builtin::BI__builtin_ms_va_start));
case Builtin::BI__builtin_ms_va_copy: {
// Lower this manually. We can't reliably determine whether or not any
// given va_copy() is for a Win64 va_list from the calling convention
// alone, because it's legal to do this from a System V ABI function.
// With opaque pointer types, we won't have enough information in LLVM
// IR to determine this from the argument types, either. Best to do it
// now, while we have enough information.
Address DestAddr = EmitMSVAListRef(E->getArg(0));
Address SrcAddr = EmitMSVAListRef(E->getArg(1));
llvm::Type *BPP = Int8PtrPtrTy;
DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
DestAddr.getAlignment());
SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
SrcAddr.getAlignment());
Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
}
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
// the call using the normal call path, but using the unmangled
// version of the function name.
if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
return emitLibraryCall(*this, FD, E,
CGM.getBuiltinLibFunction(FD, BuiltinID));
// If this is a predefined lib function (e.g. malloc), emit the call
// using exactly the normal call path.
if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return emitLibraryCall(*this, FD, E,
cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
// Check that a call to a target specific builtin has the correct target
// features.
// This is down here to avoid non-target specific builtins, however, if
// generic builtins start to require generic target features then we
// can move this up to the beginning of the function.
checkTargetFeatures(E, FD);
if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
// See if we have a target specific intrinsic.
const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
StringRef Prefix =
llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
if (!Prefix.empty()) {
IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
// NOTE we don't need to perform a compatibility flag check here since the
// intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
// MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
if (IntrinsicID == Intrinsic::not_intrinsic)
IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
}
if (IntrinsicID != Intrinsic::not_intrinsic) {
SmallVector<Value*, 16> Args;
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
Function *F = CGM.getIntrinsic(IntrinsicID);
llvm::FunctionType *FTy = F->getFunctionType();
for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
Value *ArgValue;
// If this is a normal argument, just emit it as a scalar.
if ((ICEArguments & (1 << i)) == 0) {
ArgValue = EmitScalarExpr(E->getArg(i));
} else {
// If this is required to be a constant, constant fold it so that we
// know that the generated intrinsic gets a ConstantInt.
llvm::APSInt Result;
bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
assert(IsConst && "Constant arg isn't actually constant?");
(void)IsConst;
ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
}
// If the intrinsic arg type is different from the builtin arg type
// we need to do a bit cast.
llvm::Type *PTy = FTy->getParamType(i);
if (PTy != ArgValue->getType()) {
assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param");
ArgValue = Builder.CreateBitCast(ArgValue, PTy);
}
Args.push_back(ArgValue);
}
Value *V = Builder.CreateCall(F, Args);
QualType BuiltinRetType = E->getType();
llvm::Type *RetTy = VoidTy;
if (!BuiltinRetType->isVoidType())
RetTy = ConvertType(BuiltinRetType);
if (RetTy != V->getType()) {
assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
"Must be able to losslessly bit cast result type");
V = Builder.CreateBitCast(V, RetTy);
}
return RValue::get(V);
}
// See if we have a target specific builtin that needs to be lowered.
if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
return RValue::get(V);
ErrorUnsupported(E, "builtin function");
// Unknown builtin, for now just dump it out and return undef.
return GetUndefRValue(E->getType());
}
static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
unsigned BuiltinID, const CallExpr *E,
llvm::Triple::ArchType Arch) {
switch (Arch) {
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
return CGF->EmitARMBuiltinExpr(BuiltinID, E, Arch);
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
case llvm::Triple::x86:
case llvm::Triple::x86_64:
return CGF->EmitX86BuiltinExpr(BuiltinID, E);
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
case llvm::Triple::r600:
case llvm::Triple::amdgcn:
return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
case llvm::Triple::systemz:
return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
case llvm::Triple::hexagon:
return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
default:
return nullptr;
}
}
Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
assert(getContext().getAuxTargetInfo() && "Missing aux target info");
return EmitTargetArchBuiltinExpr(
this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
getContext().getAuxTargetInfo()->getTriple().getArch());
}
return EmitTargetArchBuiltinExpr(this, BuiltinID, E,
getTarget().getTriple().getArch());
}
static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
NeonTypeFlags TypeFlags,
bool HasLegalHalfType=true,
bool V1Ty=false) {
int IsQuad = TypeFlags.isQuad();
switch (TypeFlags.getEltType()) {
case NeonTypeFlags::Int8:
case NeonTypeFlags::Poly8:
return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
case NeonTypeFlags::Int16:
case NeonTypeFlags::Poly16:
return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Float16:
if (HasLegalHalfType)
return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
else
return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Int32:
return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Int64:
case NeonTypeFlags::Poly64:
return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
case NeonTypeFlags::Poly128:
// FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
// There is a lot of i128 and f128 API missing.
// so we use v16i8 to represent poly128 and get pattern matched.
return llvm::VectorType::get(CGF->Int8Ty, 16);
case NeonTypeFlags::Float32:
return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Float64:
return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
}
llvm_unreachable("Unknown vector element type!");
}
static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
NeonTypeFlags IntTypeFlags) {
int IsQuad = IntTypeFlags.isQuad();
switch (IntTypeFlags.getEltType()) {
case NeonTypeFlags::Int16:
return llvm::VectorType::get(CGF->HalfTy, (4 << IsQuad));
case NeonTypeFlags::Int32:
return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad));
case NeonTypeFlags::Int64:
return llvm::VectorType::get(CGF->DoubleTy, (1 << IsQuad));
default:
llvm_unreachable("Type can't be converted to floating-point!");
}
}
Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
unsigned nElts = V->getType()->getVectorNumElements();
Value* SV = llvm::ConstantVector::getSplat(nElts, C);
return Builder.CreateShuffleVector(V, V, SV, "lane");
}
Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
const char *name,
unsigned shift, bool rightshift) {
unsigned j = 0;
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
ai != ae; ++ai, ++j)
if (shift > 0 && shift == j)
Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
else
Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
return Builder.CreateCall(F, Ops, name);
}
Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
bool neg) {
int SV = cast<ConstantInt>(V)->getSExtValue();
return ConstantInt::get(Ty, neg ? -SV : SV);
}
// Right-shift a vector by a constant.
Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
llvm::Type *Ty, bool usgn,
const char *name) {
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
int EltSize = VTy->getScalarSizeInBits();
Vec = Builder.CreateBitCast(Vec, Ty);
// lshr/ashr are undefined when the shift amount is equal to the vector
// element size.
if (ShiftAmt == EltSize) {
if (usgn) {
// Right-shifting an unsigned value by its size yields 0.
return llvm::ConstantAggregateZero::get(VTy);
} else {
// Right-shifting a signed value by its size is equivalent
// to a shift of size-1.
--ShiftAmt;
Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
}
}
Shift = EmitNeonShiftVector(Shift, Ty, false);
if (usgn)
return Builder.CreateLShr(Vec, Shift, name);
else
return Builder.CreateAShr(Vec, Shift, name);
}
enum {
AddRetType = (1 << 0),
Add1ArgType = (1 << 1),
Add2ArgTypes = (1 << 2),
VectorizeRetType = (1 << 3),
VectorizeArgTypes = (1 << 4),
InventFloatType = (1 << 5),
UnsignedAlts = (1 << 6),
Use64BitVectors = (1 << 7),
Use128BitVectors = (1 << 8),
Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
VectorRet = AddRetType | VectorizeRetType,
VectorRetGetArgs01 =
AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
FpCmpzModifiers =
AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
};
namespace {
struct NeonIntrinsicInfo {
const char *NameHint;
unsigned BuiltinID;
unsigned LLVMIntrinsic;
unsigned AltLLVMIntrinsic;
unsigned TypeModifier;
bool operator<(unsigned RHSBuiltinID) const {
return BuiltinID < RHSBuiltinID;
}
bool operator<(const NeonIntrinsicInfo &TE) const {
return BuiltinID < TE.BuiltinID;
}
};
} // end anonymous namespace
#define NEONMAP0(NameBase) \
{ #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
{ #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
Intrinsic::LLVMIntrinsic, 0, TypeModifier }
#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
{ #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
TypeModifier }
static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP1(vabs_v, arm_neon_vabs, 0),
NEONMAP1(vabsq_v, arm_neon_vabs, 0),
NEONMAP0(vaddhn_v),
NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
NEONMAP1(vaeseq_v, arm_neon_aese, 0),
NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
NEONMAP1(vcage_v, arm_neon_vacge, 0),
NEONMAP1(vcageq_v, arm_neon_vacge, 0),
NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
NEONMAP1(vcale_v, arm_neon_vacge, 0),
NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
NEONMAP0(vceqz_v),
NEONMAP0(vceqzq_v),
NEONMAP0(vcgez_v),
NEONMAP0(vcgezq_v),
NEONMAP0(vcgtz_v),
NEONMAP0(vcgtzq_v),
NEONMAP0(vclez_v),
NEONMAP0(vclezq_v),
NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
NEONMAP0(vcltz_v),
NEONMAP0(vcltzq_v),
NEONMAP1(vclz_v, ctlz, Add1ArgType),
NEONMAP1(vclzq_v, ctlz, Add1ArgType),
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
NEONMAP0(vcvt_f16_v),
NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP0(vcvt_s16_v),
NEONMAP0(vcvt_s32_v),
NEONMAP0(vcvt_s64_v),
NEONMAP0(vcvt_u16_v),
NEONMAP0(vcvt_u32_v),
NEONMAP0(vcvt_u64_v),
NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
NEONMAP0(vcvtq_f16_v),
NEONMAP0(vcvtq_f32_v),
NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
NEONMAP0(vcvtq_s16_v),
NEONMAP0(vcvtq_s32_v),
NEONMAP0(vcvtq_s64_v),
NEONMAP0(vcvtq_u16_v),
NEONMAP0(vcvtq_u32_v),
NEONMAP0(vcvtq_u64_v),
NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
NEONMAP0(vext_v),
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
NEONMAP0(vfmaq_v),
NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
NEONMAP0(vld1_dup_v),
NEONMAP1(vld1_v, arm_neon_vld1, 0),
NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
NEONMAP0(vld1q_dup_v),
NEONMAP1(vld1q_v, arm_neon_vld1, 0),
NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
NEONMAP1(vld2_v, arm_neon_vld2, 0),
NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
NEONMAP1(vld2q_v, arm_neon_vld2, 0),
NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
NEONMAP1(vld3_v, arm_neon_vld3, 0),
NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
NEONMAP1(vld3q_v, arm_neon_vld3, 0),
NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
NEONMAP1(vld4_v, arm_neon_vld4, 0),
NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
NEONMAP1(vld4q_v, arm_neon_vld4, 0),
NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
NEONMAP0(vmull_v),
NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0),
NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0),
NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
NEONMAP0(vrndi_v),
NEONMAP0(vrndiq_v),
NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
NEONMAP0(vshl_n_v),
NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
NEONMAP0(vshll_n_v),
NEONMAP0(vshlq_n_v),
NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
NEONMAP0(vshr_n_v),
NEONMAP0(vshrn_n_v),
NEONMAP0(vshrq_n_v),
NEONMAP1(vst1_v, arm_neon_vst1, 0),
NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
NEONMAP1(vst1q_v, arm_neon_vst1, 0),
NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
NEONMAP1(vst2_v, arm_neon_vst2, 0),
NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
NEONMAP1(vst2q_v, arm_neon_vst2, 0),
NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
NEONMAP1(vst3_v, arm_neon_vst3, 0),
NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
NEONMAP1(vst3q_v, arm_neon_vst3, 0),
NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
NEONMAP1(vst4_v, arm_neon_vst4, 0),
NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
NEONMAP1(vst4q_v, arm_neon_vst4, 0),
NEONMAP0(vsubhn_v),
NEONMAP0(vtrn_v),
NEONMAP0(vtrnq_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
NEONMAP0(vuzp_v),
NEONMAP0(vuzpq_v),
NEONMAP0(vzip_v),
NEONMAP0(vzipq_v)
};
static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vabs_v, aarch64_neon_abs, 0),
NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
NEONMAP0(vaddhn_v),
NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
NEONMAP1(vcage_v, aarch64_neon_facge, 0),
NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
NEONMAP1(vcale_v, aarch64_neon_facge, 0),
NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
NEONMAP0(vceqz_v),
NEONMAP0(vceqzq_v),
NEONMAP0(vcgez_v),
NEONMAP0(vcgezq_v),
NEONMAP0(vcgtz_v),
NEONMAP0(vcgtzq_v),
NEONMAP0(vclez_v),
NEONMAP0(vclezq_v),
NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
NEONMAP0(vcltz_v),
NEONMAP0(vcltzq_v),
NEONMAP1(vclz_v, ctlz, Add1ArgType),
NEONMAP1(vclzq_v, ctlz, Add1ArgType),
NEONMAP1(vcnt_v, ctpop, Add1ArgType),
NEONMAP1(vcntq_v, ctpop, Add1ArgType),
NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
NEONMAP0(vcvt_f16_v),
NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
NEONMAP0(vcvt_f32_v),
NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP0(vcvtq_f16_v),
NEONMAP0(vcvtq_f32_v),
NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
NEONMAP0(vext_v),
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
NEONMAP0(vfmaq_v),
NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
NEONMAP0(vrndi_v),
NEONMAP0(vrndiq_v),
NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
NEONMAP0(vshl_n_v),
NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
NEONMAP0(vshll_n_v),
NEONMAP0(vshlq_n_v),
NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
NEONMAP0(vshr_n_v),
NEONMAP0(vshrn_n_v),
NEONMAP0(vshrq_n_v),
NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
NEONMAP0(vsubhn_v),
NEONMAP0(vtst_v),
NEONMAP0(vtstq_v),
};
static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
// FP16 scalar intrinisics go here.
NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
};
#undef NEONMAP0
#undef NEONMAP1
#undef NEONMAP2
static bool NEONSIMDIntrinsicsProvenSorted = false;
static bool AArch64SIMDIntrinsicsProvenSorted = false;
static bool AArch64SISDIntrinsicsProvenSorted = false;
static const NeonIntrinsicInfo *
findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
unsigned BuiltinID, bool &MapProvenSorted) {
#ifndef NDEBUG
if (!MapProvenSorted) {
assert(std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap)));
MapProvenSorted = true;
}
#endif
const NeonIntrinsicInfo *Builtin =
std::lower_bound(IntrinsicMap.begin(), IntrinsicMap.end(), BuiltinID);
if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
return Builtin;
return nullptr;
}
Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
unsigned Modifier,
llvm::Type *ArgType,
const CallExpr *E) {
int VectorSize = 0;
if (Modifier & Use64BitVectors)
VectorSize = 64;
else if (Modifier & Use128BitVectors)
VectorSize = 128;
// Return type.
SmallVector<llvm::Type *, 3> Tys;
if (Modifier & AddRetType) {
llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
if (Modifier & VectorizeRetType)
Ty = llvm::VectorType::get(
Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
Tys.push_back(Ty);
}
// Arguments.
if (Modifier & VectorizeArgTypes) {
int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
ArgType = llvm::VectorType::get(ArgType, Elts);
}
if (Modifier & (Add1ArgType | Add2ArgTypes))
Tys.push_back(ArgType);
if (Modifier & Add2ArgTypes)
Tys.push_back(ArgType);
if (Modifier & InventFloatType)
Tys.push_back(FloatTy);
return CGM.getIntrinsic(IntrinsicID, Tys);
}
static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
const NeonIntrinsicInfo &SISDInfo,
SmallVectorImpl<Value *> &Ops,
const CallExpr *E) {
unsigned BuiltinID = SISDInfo.BuiltinID;
unsigned int Int = SISDInfo.LLVMIntrinsic;
unsigned Modifier = SISDInfo.TypeModifier;
const char *s = SISDInfo.NameHint;
switch (BuiltinID) {
case NEON::BI__builtin_neon_vcled_s64:
case NEON::BI__builtin_neon_vcled_u64:
case NEON::BI__builtin_neon_vcles_f32:
case NEON::BI__builtin_neon_vcled_f64:
case NEON::BI__builtin_neon_vcltd_s64:
case NEON::BI__builtin_neon_vcltd_u64:
case NEON::BI__builtin_neon_vclts_f32:
case NEON::BI__builtin_neon_vcltd_f64:
case NEON::BI__builtin_neon_vcales_f32:
case NEON::BI__builtin_neon_vcaled_f64:
case NEON::BI__builtin_neon_vcalts_f32:
case NEON::BI__builtin_neon_vcaltd_f64:
// Only one direction of comparisons actually exist, cmle is actually a cmge
// with swapped operands. The table gives us the right intrinsic but we
// still need to do the swap.
std::swap(Ops[0], Ops[1]);
break;
}
assert(Int && "Generic code assumes a valid intrinsic");
// Determine the type(s) of this overloaded AArch64 intrinsic.
const Expr *Arg = E->getArg(0);
llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
int j = 0;
ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
ai != ae; ++ai, ++j) {
llvm::Type *ArgTy = ai->getType();
if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
ArgTy->getPrimitiveSizeInBits())
continue;
assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
// The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
// it before inserting.
Ops[j] =
CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
Ops[j] =
CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
}
Value *Result = CGF.EmitNeonCall(F, Ops, s);
llvm::Type *ResultType = CGF.ConvertType(E->getType());
if (ResultType->getPrimitiveSizeInBits() <
Result->getType()->getPrimitiveSizeInBits())
return CGF.Builder.CreateExtractElement(Result, C0);
return CGF.Builder.CreateBitCast(Result, ResultType, s);
}
Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
const char *NameHint, unsigned Modifier, const CallExpr *E,
SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
llvm::Triple::ArchType Arch) {
// Get the last argument, which specifies the vector type.
llvm::APSInt NeonTypeConst;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
return nullptr;
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(NeonTypeConst.getZExtValue());
bool Usgn = Type.isUnsigned();
bool Quad = Type.isQuad();
const bool HasLegalHalfType = getTarget().hasLegalHalfType();
llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
auto getAlignmentValue32 = [&](Address addr) -> Value* {
return Builder.getInt32(addr.getAlignment().getQuantity());
};
unsigned Int = LLVMIntrinsic;
if ((Modifier & UnsignedAlts) && !Usgn)
Int = AltLLVMIntrinsic;
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vabs_v:
case NEON::BI__builtin_neon_vabsq_v:
if (VTy->getElementType()->isFloatingPointTy())
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
case NEON::BI__builtin_neon_vaddhn_v: {
llvm::VectorType *SrcTy =
llvm::VectorType::getExtendedElementVectorType(VTy);
// %sum = add <4 x i32> %lhs, %rhs
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
// %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
Constant *ShiftAmt =
ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
// %res = trunc <4 x i32> %high to <4 x i16>
return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
}
case NEON::BI__builtin_neon_vcale_v:
case NEON::BI__builtin_neon_vcaleq_v:
case NEON::BI__builtin_neon_vcalt_v:
case NEON::BI__builtin_neon_vcaltq_v:
std::swap(Ops[0], Ops[1]);
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcage_v:
case NEON::BI__builtin_neon_vcageq_v:
case NEON::BI__builtin_neon_vcagt_v:
case NEON::BI__builtin_neon_vcagtq_v: {
llvm::Type *Ty;
switch (VTy->getScalarSizeInBits()) {
default: llvm_unreachable("unexpected type");
case 32:
Ty = FloatTy;
break;
case 64:
Ty = DoubleTy;
break;
case 16:
Ty = HalfTy;
break;
}
llvm::Type *VecFlt = llvm::VectorType::get(Ty, VTy->getNumElements());
llvm::Type *Tys[] = { VTy, VecFlt };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, NameHint);
}
case NEON::BI__builtin_neon_vceqz_v:
case NEON::BI__builtin_neon_vceqzq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
ICmpInst::ICMP_EQ, "vceqz");
case NEON::BI__builtin_neon_vcgez_v:
case NEON::BI__builtin_neon_vcgezq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
ICmpInst::ICMP_SGE, "vcgez");
case NEON::BI__builtin_neon_vclez_v:
case NEON::BI__builtin_neon_vclezq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
ICmpInst::ICMP_SLE, "vclez");
case NEON::BI__builtin_neon_vcgtz_v:
case NEON::BI__builtin_neon_vcgtzq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
ICmpInst::ICMP_SGT, "vcgtz");
case NEON::BI__builtin_neon_vcltz_v:
case NEON::BI__builtin_neon_vcltzq_v:
return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
ICmpInst::ICMP_SLT, "vcltz");
case NEON::BI__builtin_neon_vclz_v:
case NEON::BI__builtin_neon_vclzq_v:
// We generate target-independent intrinsic, which needs a second argument
// for whether or not clz of zero is undefined; on ARM it isn't.
Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
break;
case NEON::BI__builtin_neon_vcvt_f32_v:
case NEON::BI__builtin_neon_vcvtq_f32_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
HasLegalHalfType);
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_f16_v:
case NEON::BI__builtin_neon_vcvtq_f16_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
HasLegalHalfType);
return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_n_f16_v:
case NEON::BI__builtin_neon_vcvt_n_f32_v:
case NEON::BI__builtin_neon_vcvt_n_f64_v:
case NEON::BI__builtin_neon_vcvtq_n_f16_v:
case NEON::BI__builtin_neon_vcvtq_n_f32_v:
case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
Function *F = CGM.getIntrinsic(Int, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
case NEON::BI__builtin_neon_vcvt_n_s16_v:
case NEON::BI__builtin_neon_vcvt_n_s32_v:
case NEON::BI__builtin_neon_vcvt_n_u16_v:
case NEON::BI__builtin_neon_vcvt_n_u32_v:
case NEON::BI__builtin_neon_vcvt_n_s64_v:
case NEON::BI__builtin_neon_vcvt_n_u64_v:
case NEON::BI__builtin_neon_vcvtq_n_s16_v:
case NEON::BI__builtin_neon_vcvtq_n_s32_v:
case NEON::BI__builtin_neon_vcvtq_n_u16_v:
case NEON::BI__builtin_neon_vcvtq_n_u32_v:
case NEON::BI__builtin_neon_vcvtq_n_s64_v:
case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
return EmitNeonCall(F, Ops, "vcvt_n");
}
case NEON::BI__builtin_neon_vcvt_s32_v:
case NEON::BI__builtin_neon_vcvt_u32_v:
case NEON::BI__builtin_neon_vcvt_s64_v:
case NEON::BI__builtin_neon_vcvt_u64_v:
case NEON::BI__builtin_neon_vcvt_s16_v:
case NEON::BI__builtin_neon_vcvt_u16_v:
case NEON::BI__builtin_neon_vcvtq_s32_v:
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
case NEON::BI__builtin_neon_vcvtq_u64_v:
case NEON::BI__builtin_neon_vcvtq_s16_v:
case NEON::BI__builtin_neon_vcvtq_u16_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
: Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
}
case NEON::BI__builtin_neon_vcvta_s16_v:
case NEON::BI__builtin_neon_vcvta_s32_v:
case NEON::BI__builtin_neon_vcvta_s64_v:
case NEON::BI__builtin_neon_vcvta_u16_v:
case NEON::BI__builtin_neon_vcvta_u32_v:
case NEON::BI__builtin_neon_vcvta_u64_v:
case NEON::BI__builtin_neon_vcvtaq_s16_v:
case NEON::BI__builtin_neon_vcvtaq_s32_v:
case NEON::BI__builtin_neon_vcvtaq_s64_v:
case NEON::BI__builtin_neon_vcvtaq_u16_v:
case NEON::BI__builtin_neon_vcvtaq_u32_v:
case NEON::BI__builtin_neon_vcvtaq_u64_v:
case NEON::BI__builtin_neon_vcvtn_s16_v:
case NEON::BI__builtin_neon_vcvtn_s32_v:
case NEON::BI__builtin_neon_vcvtn_s64_v:
case NEON::BI__builtin_neon_vcvtn_u16_v:
case NEON::BI__builtin_neon_vcvtn_u32_v:
case NEON::BI__builtin_neon_vcvtn_u64_v:
case NEON::BI__builtin_neon_vcvtnq_s16_v:
case NEON::BI__builtin_neon_vcvtnq_s32_v:
case NEON::BI__builtin_neon_vcvtnq_s64_v:
case NEON::BI__builtin_neon_vcvtnq_u16_v:
case NEON::BI__builtin_neon_vcvtnq_u32_v:
case NEON::BI__builtin_neon_vcvtnq_u64_v:
case NEON::BI__builtin_neon_vcvtp_s16_v:
case NEON::BI__builtin_neon_vcvtp_s32_v:
case NEON::BI__builtin_neon_vcvtp_s64_v:
case NEON::BI__builtin_neon_vcvtp_u16_v:
case NEON::BI__builtin_neon_vcvtp_u32_v:
case NEON::BI__builtin_neon_vcvtp_u64_v:
case NEON::BI__builtin_neon_vcvtpq_s16_v:
case NEON::BI__builtin_neon_vcvtpq_s32_v:
case NEON::BI__builtin_neon_vcvtpq_s64_v:
case NEON::BI__builtin_neon_vcvtpq_u16_v:
case NEON::BI__builtin_neon_vcvtpq_u32_v:
case NEON::BI__builtin_neon_vcvtpq_u64_v:
case NEON::BI__builtin_neon_vcvtm_s16_v:
case NEON::BI__builtin_neon_vcvtm_s32_v:
case NEON::BI__builtin_neon_vcvtm_s64_v:
case NEON::BI__builtin_neon_vcvtm_u16_v:
case NEON::BI__builtin_neon_vcvtm_u32_v:
case NEON::BI__builtin_neon_vcvtm_u64_v:
case NEON::BI__builtin_neon_vcvtmq_s16_v:
case NEON::BI__builtin_neon_vcvtmq_s32_v:
case NEON::BI__builtin_neon_vcvtmq_s64_v:
case NEON::BI__builtin_neon_vcvtmq_u16_v:
case NEON::BI__builtin_neon_vcvtmq_u32_v:
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
}
case NEON::BI__builtin_neon_vext_v:
case NEON::BI__builtin_neon_vextq_v: {
int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
SmallVector<uint32_t, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(i+CV);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
}
case NEON::BI__builtin_neon_vfma_v:
case NEON::BI__builtin_neon_vfmaq_v: {
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
}
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v: {
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Ops.push_back(getAlignmentValue32(PtrOp0));
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
}
case NEON::BI__builtin_neon_vld1_x2_v:
case NEON::BI__builtin_neon_vld1q_x2_v:
case NEON::BI__builtin_neon_vld1_x3_v:
case NEON::BI__builtin_neon_vld1q_x3_v:
case NEON::BI__builtin_neon_vld1_x4_v:
case NEON::BI__builtin_neon_vld1q_x4_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v:
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v:
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v:
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v:
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v:
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
Value *Align = getAlignmentValue32(PtrOp1);
Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
LoadInst *Ld = Builder.CreateLoad(PtrOp0);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
return EmitNeonSplat(Ops[0], CI);
}
case NEON::BI__builtin_neon_vld2_lane_v:
case NEON::BI__builtin_neon_vld2q_lane_v:
case NEON::BI__builtin_neon_vld3_lane_v:
case NEON::BI__builtin_neon_vld3q_lane_v:
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v: {
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
for (unsigned I = 2; I < Ops.size() - 1; ++I)
Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
Ops.push_back(getAlignmentValue32(PtrOp1));
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vmovl_v: {
llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
if (Usgn)
return Builder.CreateZExt(Ops[0], Ty, "vmovl");
return Builder.CreateSExt(Ops[0], Ty, "vmovl");
}
case NEON::BI__builtin_neon_vmovn_v: {
llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
}
case NEON::BI__builtin_neon_vmull_v:
// FIXME: the integer vmull operations could be emitted in terms of pure
// LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
// hoisting the exts outside loops. Until global ISel comes along that can
// see through such movement this leads to bad CodeGen. So we need an
// intrinsic for now.
Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
case NEON::BI__builtin_neon_vpadal_v:
case NEON::BI__builtin_neon_vpadalq_v: {
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
llvm::Type *EltTy =
llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
}
case NEON::BI__builtin_neon_vpaddl_v:
case NEON::BI__builtin_neon_vpaddlq_v: {
// The source operand type has twice as many elements of half the size.
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
llvm::Type *NarrowTy =
llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
llvm::Type *Tys[2] = { Ty, NarrowTy };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
}
case NEON::BI__builtin_neon_vqdmlal_v:
case NEON::BI__builtin_neon_vqdmlsl_v: {
SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
Ops[1] =
EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
Ops.resize(2);
return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
}
case NEON::BI__builtin_neon_vqshl_n_v:
case NEON::BI__builtin_neon_vqshlq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
1, false);
case NEON::BI__builtin_neon_vqshlu_n_v:
case NEON::BI__builtin_neon_vqshluq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
1, false);
case NEON::BI__builtin_neon_vrecpe_v:
case NEON::BI__builtin_neon_vrecpeq_v:
case NEON::BI__builtin_neon_vrsqrte_v:
case NEON::BI__builtin_neon_vrsqrteq_v:
Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
case NEON::BI__builtin_neon_vrndi_v:
case NEON::BI__builtin_neon_vrndiq_v:
Int = Intrinsic::nearbyint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
case NEON::BI__builtin_neon_vrshr_n_v:
case NEON::BI__builtin_neon_vrshrq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
1, true);
case NEON::BI__builtin_neon_vshl_n_v:
case NEON::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
"vshl_n");
case NEON::BI__builtin_neon_vshll_n_v: {
llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
if (Usgn)
Ops[0] = Builder.CreateZExt(Ops[0], VTy);
else
Ops[0] = Builder.CreateSExt(Ops[0], VTy);
Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
}
case NEON::BI__builtin_neon_vshrn_n_v: {
llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
if (Usgn)
Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
else
Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
}
case NEON::BI__builtin_neon_vshr_n_v:
case NEON::BI__builtin_neon_vshrq_n_v:
return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v:
case NEON::BI__builtin_neon_vst3_v:
case NEON::BI__builtin_neon_vst3q_v:
case NEON::BI__builtin_neon_vst4_v:
case NEON::BI__builtin_neon_vst4q_v:
case NEON::BI__builtin_neon_vst2_lane_v:
case NEON::BI__builtin_neon_vst2q_lane_v:
case NEON::BI__builtin_neon_vst3_lane_v:
case NEON::BI__builtin_neon_vst3q_lane_v:
case NEON::BI__builtin_neon_vst4_lane_v:
case NEON::BI__builtin_neon_vst4q_lane_v: {
llvm::Type *Tys[] = {Int8PtrTy, Ty};
Ops.push_back(getAlignmentValue32(PtrOp0));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vst1_x2_v:
case NEON::BI__builtin_neon_vst1q_x2_v:
case NEON::BI__builtin_neon_vst1_x3_v:
case NEON::BI__builtin_neon_vst1q_x3_v:
case NEON::BI__builtin_neon_vst1_x4_v:
case NEON::BI__builtin_neon_vst1q_x4_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
// TODO: Currently in AArch32 mode the pointer operand comes first, whereas
// in AArch64 it comes last. We may want to stick to one or another.
if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be) {
llvm::Type *Tys[2] = { VTy, PTy };
std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
llvm::Type *Tys[2] = { PTy, VTy };
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vsubhn_v: {
llvm::VectorType *SrcTy =
llvm::VectorType::getExtendedElementVectorType(VTy);
// %sum = add <4 x i32> %lhs, %rhs
Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
// %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
Constant *ShiftAmt =
ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
// %res = trunc <4 x i32> %high to <4 x i16>
return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
}
case NEON::BI__builtin_neon_vtrn_v:
case NEON::BI__builtin_neon_vtrnq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<uint32_t, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back(i+vi);
Indices.push_back(i+e+vi);
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vtst_v:
case NEON::BI__builtin_neon_vtstq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
ConstantAggregateZero::get(Ty));
return Builder.CreateSExt(Ops[0], Ty, "vtst");
}
case NEON::BI__builtin_neon_vuzp_v:
case NEON::BI__builtin_neon_vuzpq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<uint32_t, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(2*i+vi);
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vzip_v:
case NEON::BI__builtin_neon_vzipq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<uint32_t, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back((i + vi*e) >> 1);
Indices.push_back(((i + vi*e) >> 1)+e);
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vdot_v:
case NEON::BI__builtin_neon_vdotq_v: {
llvm::Type *InputTy =
llvm::VectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
llvm::Type *Tys[2] = { Ty, InputTy };
Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
}
}
assert(Int && "Expected valid intrinsic number");
// Determine the type(s) of this overloaded AArch64 intrinsic.
Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
Value *Result = EmitNeonCall(F, Ops, NameHint);
llvm::Type *ResultType = ConvertType(E->getType());
// AArch64 intrinsic one-element vector type cast to
// scalar type expected by the builtin
return Builder.CreateBitCast(Result, ResultType, NameHint);
}
Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
const CmpInst::Predicate Ip, const Twine &Name) {
llvm::Type *OTy = Op->getType();
// FIXME: this is utterly horrific. We should not be looking at previous
// codegen context to find out what needs doing. Unfortunately TableGen
// currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
// (etc).
if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
OTy = BI->getOperand(0)->getType();
Op = Builder.CreateBitCast(Op, OTy);
if (OTy->getScalarType()->isFloatingPointTy()) {
Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
} else {
Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
}
return Builder.CreateSExt(Op, Ty, Name);
}
static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Value *ExtOp, Value *IndexOp,
llvm::Type *ResTy, unsigned IntID,
const char *Name) {
SmallVector<Value *, 2> TblOps;
if (ExtOp)
TblOps.push_back(ExtOp);
// Build a vector containing sequential number like (0, 1, 2, ..., 15)
SmallVector<uint32_t, 16> Indices;
llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
Indices.push_back(2*i);
Indices.push_back(2*i+1);
}
int PairPos = 0, End = Ops.size() - 1;
while (PairPos < End) {
TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
Ops[PairPos+1], Indices,
Name));
PairPos += 2;
}
// If there's an odd number of 64-bit lookup table, fill the high 64-bit
// of the 128-bit lookup table with zero.
if (PairPos == End) {
Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
ZeroTbl, Indices, Name));
}
Function *TblF;
TblOps.push_back(IndexOp);
TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
return CGF.EmitNeonCall(TblF, TblOps, Name);
}
Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
unsigned Value;
switch (BuiltinID) {
default:
return nullptr;
case ARM::BI__builtin_arm_nop:
Value = 0;
break;
case ARM::BI__builtin_arm_yield:
case ARM::BI__yield:
Value = 1;
break;
case ARM::BI__builtin_arm_wfe:
case ARM::BI__wfe:
Value = 2;
break;
case ARM::BI__builtin_arm_wfi:
case ARM::BI__wfi:
Value = 3;
break;
case ARM::BI__builtin_arm_sev:
case ARM::BI__sev:
Value = 4;
break;
case ARM::BI__builtin_arm_sevl:
case ARM::BI__sevl:
Value = 5;
break;
}
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
llvm::ConstantInt::get(Int32Ty, Value));
}
// Generates the IR for the read/write special register builtin,
// ValueType is the type of the value that is to be written or read,
// RegisterType is the type of the register being written to or read from.
static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
const CallExpr *E,
llvm::Type *RegisterType,
llvm::Type *ValueType,
bool IsRead,
StringRef SysReg = "") {
// write and register intrinsics only support 32 and 64 bit operations.
assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
&& "Unsupported size for register.");
CodeGen::CGBuilderTy &Builder = CGF.Builder;
CodeGen::CodeGenModule &CGM = CGF.CGM;
LLVMContext &Context = CGM.getLLVMContext();
if (SysReg.empty()) {
const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
}
llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
llvm::Type *Types[] = { RegisterType };
bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
&& "Can't fit 64-bit value in 32-bit register");
if (IsRead) {
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
llvm::Value *Call = Builder.CreateCall(F, Metadata);
if (MixedTypes)
// Read into 64 bit register and then truncate result to 32 bit.
return Builder.CreateTrunc(Call, ValueType);
if (ValueType->isPointerTy())
// Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
return Builder.CreateIntToPtr(Call, ValueType);
return Call;
}
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
if (MixedTypes) {
// Extend 32 bit write value to 64 bit to pass to write.
ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
return Builder.CreateCall(F, { Metadata, ArgValue });
}
if (ValueType->isPointerTy()) {
// Have VoidPtrTy ArgValue but want to return an i32/i64.
ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
return Builder.CreateCall(F, { Metadata, ArgValue });
}
return Builder.CreateCall(F, { Metadata, ArgValue });
}
/// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
/// argument that specifies the vector type.
static bool HasExtraNeonArgument(unsigned BuiltinID) {
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vget_lane_f32:
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vgetq_lane_f32:
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
case NEON::BI__builtin_neon_vsetq_lane_f32:
case NEON::BI__builtin_neon_vsha1h_u32:
case NEON::BI__builtin_neon_vsha1cq_u32:
case NEON::BI__builtin_neon_vsha1pq_u32:
case NEON::BI__builtin_neon_vsha1mq_u32:
case clang::ARM::BI_MoveToCoprocessor:
case clang::ARM::BI_MoveToCoprocessor2:
return false;
}
return true;
}
Value *CodeGenFunction::EmitISOVolatileLoad(const CallExpr *E) {
Value *Ptr = EmitScalarExpr(E->getArg(0));
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits LoadSize = getContext().getTypeSizeInChars(ElTy);
llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
LoadSize.getQuantity() * 8);
Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::LoadInst *Load =
Builder.CreateAlignedLoad(Ptr, LoadSize);
Load->setVolatile(true);
return Load;
}
Value *CodeGenFunction::EmitISOVolatileStore(const CallExpr *E) {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Value = EmitScalarExpr(E->getArg(1));
QualType ElTy = E->getArg(0)->getType()->getPointeeType();
CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
StoreSize.getQuantity() * 8);
Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
llvm::StoreInst *Store =
Builder.CreateAlignedStore(Value, Ptr,
StoreSize);
Store->setVolatile(true);
return Store;
}
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
llvm::Triple::ArchType Arch) {
if (auto Hint = GetValueForARMHint(BuiltinID))
return Hint;
if (BuiltinID == ARM::BI__emit) {
bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
APSInt Value;
if (!E->getArg(0)->EvaluateAsInt(Value, CGM.getContext()))
llvm_unreachable("Sema will ensure that the parameter is constant");
uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
llvm::InlineAsm *Emit =
IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
/*SideEffects=*/true)
: InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
/*SideEffects=*/true);
return Builder.CreateCall(Emit);
}
if (BuiltinID == ARM::BI__builtin_arm_dbg) {
Value *Option = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
}
if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *RW = EmitScalarExpr(E->getArg(1));
Value *IsData = EmitScalarExpr(E->getArg(2));
// Locality is not supported on ARM target
Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, IsData});
}
if (BuiltinID == ARM::BI__builtin_arm_rbit) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
if (BuiltinID == ARM::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
Value *Ops[2];
for (unsigned i = 0; i < 2; i++)
Ops[i] = EmitScalarExpr(E->getArg(i));
llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
StringRef Name = FD->getName();
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
BuiltinID == ARM::BI__builtin_arm_mcrr2) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
case ARM::BI__builtin_arm_mcrr:
F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
break;
case ARM::BI__builtin_arm_mcrr2:
F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
break;
}
// MCRR{2} instruction has 5 operands but
// the intrinsic has 4 because Rt and Rt2
// are represented as a single unsigned 64
// bit integer in the intrinsic definition
// but internally it's represented as 2 32
// bit integers.
Value *Coproc = EmitScalarExpr(E->getArg(0));
Value *Opc1 = EmitScalarExpr(E->getArg(1));
Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
Value *CRm = EmitScalarExpr(E->getArg(3));
Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
}
if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
BuiltinID == ARM::BI__builtin_arm_mrrc2) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
case ARM::BI__builtin_arm_mrrc:
F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
break;
case ARM::BI__builtin_arm_mrrc2:
F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
break;
}
Value *Coproc = EmitScalarExpr(E->getArg(0));
Value *Opc1 = EmitScalarExpr(E->getArg(1));
Value *CRm = EmitScalarExpr(E->getArg(2));
Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
// Returns an unsigned 64 bit integer, represented
// as two 32 bit integers.
Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
Rt = Builder.CreateZExt(Rt, Int64Ty);
Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
}
if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
((BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex) &&
getContext().getTypeSize(E->getType()) == 64) ||
BuiltinID == ARM::BI__ldrexd) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
case ARM::BI__builtin_arm_ldaex:
F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
break;
case ARM::BI__builtin_arm_ldrexd:
case ARM::BI__builtin_arm_ldrex:
case ARM::BI__ldrexd:
F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
break;
}
Value *LdPtr = EmitScalarExpr(E->getArg(0));
Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
"ldrexd");
Value *Val0 = Builder.CreateExtractValue(Val, 1);
Value *Val1 = Builder.CreateExtractValue(Val, 0);
Val0 = Builder.CreateZExt(Val0, Int64Ty);
Val1 = Builder.CreateZExt(Val1, Int64Ty);
Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
Val = Builder.CreateOr(Val, Val1);
return Builder.CreateBitCast(Val, ConvertType(E->getType()));
}
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex) {
Value *LoadAddr = EmitScalarExpr(E->getArg(0));
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
llvm::Type *PtrTy = llvm::IntegerType::get(
getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
? Intrinsic::arm_ldaex
: Intrinsic::arm_ldrex,
PtrTy);
Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
else {
llvm::Type *IntResTy = llvm::IntegerType::get(
getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
return Builder.CreateBitCast(Val, RealResTy);
}
}
if (BuiltinID == ARM::BI__builtin_arm_strexd ||
((BuiltinID == ARM::BI__builtin_arm_stlex ||
BuiltinID == ARM::BI__builtin_arm_strex) &&
getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
? Intrinsic::arm_stlexd
: Intrinsic::arm_strexd);
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
Val = Builder.CreateLoad(LdPtr);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
}
if (BuiltinID == ARM::BI__builtin_arm_strex ||
BuiltinID == ARM::BI__builtin_arm_stlex) {
Value *StoreVal = EmitScalarExpr(E->getArg(0));
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
QualType Ty = E->getArg(0)->getType();
llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(Ty));
StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
else {
llvm::Type *IntTy = llvm::IntegerType::get(
getLLVMContext(),
CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
}
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
? Intrinsic::arm_stlex
: Intrinsic::arm_strex,
StoreAddr->getType());
return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
}
switch (BuiltinID) {
case ARM::BI__iso_volatile_load8:
case ARM::BI__iso_volatile_load16:
case ARM::BI__iso_volatile_load32:
case ARM::BI__iso_volatile_load64:
return EmitISOVolatileLoad(E);
case ARM::BI__iso_volatile_store8:
case ARM::BI__iso_volatile_store16:
case ARM::BI__iso_volatile_store32:
case ARM::BI__iso_volatile_store64:
return EmitISOVolatileStore(E);
}
if (BuiltinID == ARM::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
return Builder.CreateCall(F);
}
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
case ARM::BI__builtin_arm_crc32b:
CRCIntrinsicID = Intrinsic::arm_crc32b; break;
case ARM::BI__builtin_arm_crc32cb:
CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
case ARM::BI__builtin_arm_crc32h:
CRCIntrinsicID = Intrinsic::arm_crc32h; break;
case ARM::BI__builtin_arm_crc32ch:
CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
case ARM::BI__builtin_arm_crc32w:
case ARM::BI__builtin_arm_crc32d:
CRCIntrinsicID = Intrinsic::arm_crc32w; break;
case ARM::BI__builtin_arm_crc32cw:
case ARM::BI__builtin_arm_crc32cd:
CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
}
if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
Value *Arg0 = EmitScalarExpr(E->getArg(0));
Value *Arg1 = EmitScalarExpr(E->getArg(1));
// crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
// intrinsics, hence we need different codegen for these cases.
if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
BuiltinID == ARM::BI__builtin_arm_crc32cd) {
Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
Value *Arg1b = Builder.CreateLShr(Arg1, C1);
Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
Function *F = CGM.getIntrinsic(CRCIntrinsicID);
Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
return Builder.CreateCall(F, {Res, Arg1b});
} else {
Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
Function *F = CGM.getIntrinsic(CRCIntrinsicID);
return Builder.CreateCall(F, {Arg0, Arg1});
}
}
if (BuiltinID == ARM::BI__builtin_arm_rsr ||
BuiltinID == ARM::BI__builtin_arm_rsr64 ||
BuiltinID == ARM::BI__builtin_arm_rsrp ||
BuiltinID == ARM::BI__builtin_arm_wsr ||
BuiltinID == ARM::BI__builtin_arm_wsr64 ||
BuiltinID == ARM::BI__builtin_arm_wsrp) {
bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr ||
BuiltinID == ARM::BI__builtin_arm_rsr64 ||
BuiltinID == ARM::BI__builtin_arm_rsrp;
bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
BuiltinID == ARM::BI__builtin_arm_wsrp;
bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
BuiltinID == ARM::BI__builtin_arm_wsr64;
llvm::Type *ValueType;
llvm::Type *RegisterType;
if (IsPointerBuiltin) {
ValueType = VoidPtrTy;
RegisterType = Int32Ty;
} else if (Is64Bit) {
ValueType = RegisterType = Int64Ty;
} else {
ValueType = RegisterType = Int32Ty;
}
return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
}
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
auto getAlignmentValue32 = [&](Address addr) -> Value* {
return Builder.getInt32(addr.getAlignment().getQuantity());
};
Address PtrOp0 = Address::invalid();
Address PtrOp1 = Address::invalid();
SmallVector<Value*, 4> Ops;
bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
for (unsigned i = 0, e = NumArgs; i != e; i++) {
if (i == 0) {
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v:
case NEON::BI__builtin_neon_vld1q_lane_v:
case NEON::BI__builtin_neon_vld1_lane_v:
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v:
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
case NEON::BI__builtin_neon_vst1q_lane_v:
case NEON::BI__builtin_neon_vst1_lane_v:
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v:
case NEON::BI__builtin_neon_vst2_lane_v:
case NEON::BI__builtin_neon_vst2q_lane_v:
case NEON::BI__builtin_neon_vst3_v:
case NEON::BI__builtin_neon_vst3q_v:
case NEON::BI__builtin_neon_vst3_lane_v:
case NEON::BI__builtin_neon_vst3q_lane_v:
case NEON::BI__builtin_neon_vst4_v:
case NEON::BI__builtin_neon_vst4q_v:
case NEON::BI__builtin_neon_vst4_lane_v:
case NEON::BI__builtin_neon_vst4q_lane_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
Ops.push_back(PtrOp0.getPointer());
continue;
}
}
if (i == 1) {
switch (BuiltinID) {
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v:
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v:
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v:
case NEON::BI__builtin_neon_vld2_lane_v:
case NEON::BI__builtin_neon_vld2q_lane_v:
case NEON::BI__builtin_neon_vld3_lane_v:
case NEON::BI__builtin_neon_vld3q_lane_v:
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v:
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v:
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v:
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v:
// Get the alignment for the argument in addition to the value;
// we'll use it later.
PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
Ops.push_back(PtrOp1.getPointer());
continue;
}
}
if ((ICEArguments & (1 << i)) == 0) {
Ops.push_back(EmitScalarExpr(E->getArg(i)));
} else {
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
llvm::APSInt Result;
bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
}
}
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vget_lane_f32:
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vgetq_lane_f32:
return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
case NEON::BI__builtin_neon_vrndns_f32: {
Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Tys[] = {Arg->getType()};
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
return Builder.CreateCall(F, {Arg}, "vrndn"); }
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
case NEON::BI__builtin_neon_vsetq_lane_f32:
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vsha1h_u32:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
"vsha1h");
case NEON::BI__builtin_neon_vsha1cq_u32:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
"vsha1h");
case NEON::BI__builtin_neon_vsha1pq_u32:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
"vsha1h");
case NEON::BI__builtin_neon_vsha1mq_u32:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
"vsha1h");
// The ARM _MoveToCoprocessor builtins put the input register value as
// the first argument, but the LLVM intrinsic expects it as the third one.
case ARM::BI_MoveToCoprocessor:
case ARM::BI_MoveToCoprocessor2: {
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
Ops[3], Ops[4], Ops[5]});
}
case ARM::BI_BitScanForward:
case ARM::BI_BitScanForward64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
case ARM::BI_BitScanReverse:
case ARM::BI_BitScanReverse64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
case ARM::BI_InterlockedAnd64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
case ARM::BI_InterlockedExchange64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
case ARM::BI_InterlockedExchangeAdd64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
case ARM::BI_InterlockedExchangeSub64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
case ARM::BI_InterlockedOr64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
case ARM::BI_InterlockedXor64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
case ARM::BI_InterlockedDecrement64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
case ARM::BI_InterlockedIncrement64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
}
// Get the last argument, which specifies the vector type.
assert(HasExtraArg);
llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs()-1);
if (!Arg->isIntegerConstantExpr(Result, getContext()))
return nullptr;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
// Determine the overloaded type of this builtin.
llvm::Type *Ty;
if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
Ty = FloatTy;
else
Ty = DoubleTy;
// Determine whether this is an unsigned conversion or not.
bool usgn = Result.getZExtValue() == 1;
unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
// Call the appropriate intrinsic.
Function *F = CGM.getIntrinsic(Int, Ty);
return Builder.CreateCall(F, Ops, "vcvtr");
}
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(Result.getZExtValue());
bool usgn = Type.isUnsigned();
bool rightShift = false;
llvm::VectorType *VTy = GetNeonType(this, Type,
getTarget().hasLegalHalfType());
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
// Many NEON builtins have identical semantics and uses in ARM and
// AArch64. Emit these in a single function.
auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
if (Builtin)
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
unsigned Int;
switch (BuiltinID) {
default: return nullptr;
case NEON::BI__builtin_neon_vld1q_lane_v:
// Handle 64-bit integer elements as a special case. Use shuffles of
// one-element vectors to avoid poor code for i64 in the backend.
if (VTy->getElementType()->isIntegerTy(64)) {
// Extract the other lane.
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
uint32_t Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
// Load the value as a one-element vector.
Ty = llvm::VectorType::get(VTy->getElementType(), 1);
llvm::Type *Tys[] = {Ty, Int8PtrTy};
Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
Value *Align = getAlignmentValue32(PtrOp0);
Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
// Combine them.
uint32_t Indices[] = {1 - Lane, Lane};
SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
}
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vld1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
Value *Ld = Builder.CreateLoad(PtrOp0);
return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
}
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int =
usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
1, true);
case NEON::BI__builtin_neon_vqrshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
Ops, "vqrshrun_n", 1, true);
case NEON::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
1, true);
case NEON::BI__builtin_neon_vqshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
Ops, "vqshrun_n", 1, true);
case NEON::BI__builtin_neon_vrecpe_v:
case NEON::BI__builtin_neon_vrecpeq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
Ops, "vrecpe");
case NEON::BI__builtin_neon_vrshrn_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
Ops, "vrshrn_n", 1, true);
case NEON::BI__builtin_neon_vrsra_n_v:
case NEON::BI__builtin_neon_vrsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
case NEON::BI__builtin_neon_vsri_n_v:
case NEON::BI__builtin_neon_vsriq_n_v:
rightShift = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vsli_n_v:
case NEON::BI__builtin_neon_vsliq_n_v:
Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
Ops, "vsli_n");
case NEON::BI__builtin_neon_vsra_n_v:
case NEON::BI__builtin_neon_vsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
case NEON::BI__builtin_neon_vst1q_lane_v:
// Handle 64-bit integer elements as a special case. Use a shuffle to get
// a one-element vector and avoid poor code for i64 in the backend.
if (VTy->getElementType()->isIntegerTy(64)) {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
Ops[2] = getAlignmentValue32(PtrOp0);
llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
Tys), Ops);
}
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vst1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
return St;
}
case NEON::BI__builtin_neon_vtbl1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
Ops, "vtbl1");
case NEON::BI__builtin_neon_vtbl2_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
Ops, "vtbl2");
case NEON::BI__builtin_neon_vtbl3_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
Ops, "vtbl3");
case NEON::BI__builtin_neon_vtbl4_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
Ops, "vtbl4");
case NEON::BI__builtin_neon_vtbx1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
Ops, "vtbx1");
case NEON::BI__builtin_neon_vtbx2_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
Ops, "vtbx2");
case NEON::BI__builtin_neon_vtbx3_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
Ops, "vtbx3");
case NEON::BI__builtin_neon_vtbx4_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
Ops, "vtbx4");
}
}
static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
const CallExpr *E,
SmallVectorImpl<Value *> &Ops,
llvm::Triple::ArchType Arch) {
unsigned int Int = 0;
const char *s = nullptr;
switch (BuiltinID) {
default:
return nullptr;
case NEON::BI__builtin_neon_vtbl1_v:
case NEON::BI__builtin_neon_vqtbl1_v:
case NEON::BI__builtin_neon_vqtbl1q_v:
case NEON::BI__builtin_neon_vtbl2_v:
case NEON::BI__builtin_neon_vqtbl2_v:
case NEON::BI__builtin_neon_vqtbl2q_v:
case NEON::BI__builtin_neon_vtbl3_v:
case NEON::BI__builtin_neon_vqtbl3_v:
case NEON::BI__builtin_neon_vqtbl3q_v:
case NEON::BI__builtin_neon_vtbl4_v:
case NEON::BI__builtin_neon_vqtbl4_v:
case NEON::BI__builtin_neon_vqtbl4q_v:
break;
case NEON::BI__builtin_neon_vtbx1_v:
case NEON::BI__builtin_neon_vqtbx1_v:
case NEON::BI__builtin_neon_vqtbx1q_v:
case NEON::BI__builtin_neon_vtbx2_v:
case NEON::BI__builtin_neon_vqtbx2_v:
case NEON::BI__builtin_neon_vqtbx2q_v:
case NEON::BI__builtin_neon_vtbx3_v:
case NEON::BI__builtin_neon_vqtbx3_v:
case NEON::BI__builtin_neon_vqtbx3q_v:
case NEON::BI__builtin_neon_vtbx4_v:
case NEON::BI__builtin_neon_vqtbx4_v:
case NEON::BI__builtin_neon_vqtbx4q_v:
break;
}
assert(E->getNumArgs() >= 3);
// Get the last argument, which specifies the vector type.
llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs() - 1);
if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
return nullptr;
// Determine the type of this overloaded NEON intrinsic.
NeonTypeFlags Type(Result.getZExtValue());
llvm::VectorType *Ty = GetNeonType(&CGF, Type);
if (!Ty)
return nullptr;
CodeGen::CGBuilderTy &Builder = CGF.Builder;
// AArch64 scalar builtins are not overloaded, they do not have an extra
// argument that specifies the vector type, need to handle each case.
switch (BuiltinID) {
case NEON::BI__builtin_neon_vtbl1_v: {
return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
"vtbl1");
}
case NEON::BI__builtin_neon_vtbl2_v: {
return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
"vtbl1");
}
case NEON::BI__builtin_neon_vtbl3_v: {
return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
"vtbl2");
}
case NEON::BI__builtin_neon_vtbl4_v: {
return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
"vtbl2");
}
case NEON::BI__builtin_neon_vtbx1_v: {
Value *TblRes =
packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
llvm::Constant *EightV = ConstantInt::get(Ty, 8);
Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
CmpRes = Builder.CreateSExt(CmpRes, Ty);
Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
}
case NEON::BI__builtin_neon_vtbx2_v: {
return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
"vtbx1");
}
case NEON::BI__builtin_neon_vtbx3_v: {
Value *TblRes =
packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
TwentyFourV);
CmpRes = Builder.CreateSExt(CmpRes, Ty);
Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
}
case NEON::BI__builtin_neon_vtbx4_v: {
return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
"vtbx2");
}
case NEON::BI__builtin_neon_vqtbl1_v:
case NEON::BI__builtin_neon_vqtbl1q_v:
Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
case NEON::BI__builtin_neon_vqtbl2_v:
case NEON::BI__builtin_neon_vqtbl2q_v: {
Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
case NEON::BI__builtin_neon_vqtbl3_v:
case NEON::BI__builtin_neon_vqtbl3q_v:
Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
case NEON::BI__builtin_neon_vqtbl4_v:
case NEON::BI__builtin_neon_vqtbl4q_v:
Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
case NEON::BI__builtin_neon_vqtbx1_v:
case NEON::BI__builtin_neon_vqtbx1q_v:
Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
case NEON::BI__builtin_neon_vqtbx2_v:
case NEON::BI__builtin_neon_vqtbx2q_v:
Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
case NEON::BI__builtin_neon_vqtbx3_v:
case NEON::BI__builtin_neon_vqtbx3q_v:
Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
case NEON::BI__builtin_neon_vqtbx4_v:
case NEON::BI__builtin_neon_vqtbx4q_v:
Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
}
}
if (!Int)
return nullptr;
Function *F = CGF.CGM.getIntrinsic(Int, Ty);
return CGF.EmitNeonCall(F, Ops, s);
}
Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
Op = Builder.CreateBitCast(Op, Int16Ty);
Value *V = UndefValue::get(VTy);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Op = Builder.CreateInsertElement(V, Op, CI);
return Op;
}
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
llvm::Triple::ArchType Arch) {
unsigned HintID = static_cast<unsigned>(-1);
switch (BuiltinID) {
default: break;
case AArch64::BI__builtin_arm_nop:
HintID = 0;
break;
case AArch64::BI__builtin_arm_yield:
case AArch64::BI__yield:
HintID = 1;
break;
case AArch64::BI__builtin_arm_wfe:
case AArch64::BI__wfe:
HintID = 2;
break;
case AArch64::BI__builtin_arm_wfi:
case AArch64::BI__wfi:
HintID = 3;
break;
case AArch64::BI__builtin_arm_sev:
case AArch64::BI__sev:
HintID = 4;
break;
case AArch64::BI__builtin_arm_sevl:
case AArch64::BI__sevl:
HintID = 5;
break;
}
if (HintID != static_cast<unsigned>(-1)) {
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
}
if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *RW = EmitScalarExpr(E->getArg(1));
Value *CacheLevel = EmitScalarExpr(E->getArg(2));
Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
Value *IsData = EmitScalarExpr(E->getArg(4));
Value *Locality = nullptr;
if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
// Temporal fetch, needs to convert cache level to locality.
Locality = llvm::ConstantInt::get(Int32Ty,
-cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
} else {
// Streaming fetch.
Locality = llvm::ConstantInt::get(Int32Ty, 0);
}
// FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
// PLDL3STRM or PLDL2STRM.
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, IsData});
}
if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
assert((getContext().getTypeSize(E->getType()) == 32) &&
"rbit of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
assert((getContext().getTypeSize(E->getType()) == 64) &&
"rbit of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
if (BuiltinID == AArch64::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
Value *Ops[2];
for (unsigned i = 0; i < 2; i++)
Ops[i] = EmitScalarExpr(E->getArg(i));
llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
StringRef Name = FD->getName();
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
getContext().getTypeSize(E->getType()) == 128) {
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxp
: Intrinsic::aarch64_ldxp);
Value *LdPtr = EmitScalarExpr(E->getArg(0));
Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
"ldxp");
Value *Val0 = Builder.CreateExtractValue(Val, 1);
Value *Val1 = Builder.CreateExtractValue(Val, 0);
llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
Val0 = Builder.CreateZExt(Val0, Int128Ty);
Val1 = Builder.CreateZExt(Val1, Int128Ty);
Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
Val = Builder.CreateOr(Val, Val1);
return Builder.CreateBitCast(Val, ConvertType(E->getType()));
} else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex) {
Value *LoadAddr = EmitScalarExpr(E->getArg(0));
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
llvm::Type *PtrTy = llvm::IntegerType::get(
getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxr
: Intrinsic::aarch64_ldxr,
PtrTy);
Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
llvm::Type *IntResTy = llvm::IntegerType::get(
getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
return Builder.CreateBitCast(Val, RealResTy);
}
if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
BuiltinID == AArch64::BI__builtin_arm_stlex) &&
getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
? Intrinsic::aarch64_stlxp
: Intrinsic::aarch64_stxp);
llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
llvm::Value *Val = Builder.CreateLoad(Tmp);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
Int8PtrTy);
return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
}
if (BuiltinID == AArch64::BI__builtin_arm_strex ||
BuiltinID == AArch64::BI__builtin_arm_stlex) {
Value *StoreVal = EmitScalarExpr(E->getArg(0));
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
QualType Ty = E->getArg(0)->getType();
llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
getContext().getTypeSize(Ty));
StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
else {
llvm::Type *IntTy = llvm::IntegerType::get(
getLLVMContext(),
CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
}
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
? Intrinsic::aarch64_stlxr
: Intrinsic::aarch64_stxr,
StoreAddr->getType());
return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
}
if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
return Builder.CreateCall(F);
}
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
case AArch64::BI__builtin_arm_crc32b:
CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
case AArch64::BI__builtin_arm_crc32cb:
CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
case AArch64::BI__builtin_arm_crc32h:
CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
case AArch64::BI__builtin_arm_crc32ch:
CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
case AArch64::BI__builtin_arm_crc32w:
CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
case AArch64::BI__builtin_arm_crc32cw:
CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
case AArch64::BI__builtin_arm_crc32d:
CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
case AArch64::BI__builtin_arm_crc32cd:
CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
}
if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
Value *Arg0 = EmitScalarExpr(E->getArg(0));
Value *Arg1 = EmitScalarExpr(E->getArg(1));
Function *F = CGM.getIntrinsic(CRCIntrinsicID);
llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
return Builder.CreateCall(F, {Arg0, Arg1});
}
if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
BuiltinID == AArch64::BI__builtin_arm_rsrp ||
BuiltinID == AArch64::BI__builtin_arm_wsr ||
BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
BuiltinID == AArch64::BI__builtin_arm_wsrp) {
bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr ||
BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
BuiltinID == AArch64::BI__builtin_arm_rsrp;
bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
BuiltinID == AArch64::BI__builtin_arm_wsrp;
bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
BuiltinID != AArch64::BI__builtin_arm_wsr;
llvm::Type *ValueType;
llvm::Type *RegisterType = Int64Ty;
if (IsPointerBuiltin) {
ValueType = VoidPtrTy;
} else if (Is64Bit) {
ValueType = Int64Ty;
} else {
ValueType = Int32Ty;
}
return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
}
// Find out if any arguments are required to be integer constant
// expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
llvm::SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
if ((ICEArguments & (1 << i)) == 0) {
Ops.push_back(EmitScalarExpr(E->getArg(i)));
} else {
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
llvm::APSInt Result;
bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
assert(IsConst && "Constant arg isn't actually constant?");
(void)IsConst;
Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
}
}
auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
if (Builtin) {
Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
assert(Result && "SISD intrinsic should have been handled");
return Result;
}
llvm::APSInt Result;
const Expr *Arg = E->getArg(E->getNumArgs()-1);
NeonTypeFlags Type(0);
if (Arg->isIntegerConstantExpr(Result, getContext()))
// Determine the type of this overloaded NEON intrinsic.
Type = NeonTypeFlags(Result.getZExtValue());
bool usgn = Type.isUnsigned();
bool quad = Type.isQuad();
// Handle non-overloaded intrinsics first.
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vabsh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
case NEON::BI__builtin_neon_vldrq_p128: {
llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
return Builder.CreateAlignedLoad(Int128Ty, Ptr,
CharUnits::fromQuantity(16));
}
case NEON::BI__builtin_neon_vstrq_p128: {
llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
}
case NEON::BI__builtin_neon_vcvts_u32_f32:
case NEON::BI__builtin_neon_vcvtd_u64_f64:
usgn = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvts_s32_f32:
case NEON::BI__builtin_neon_vcvtd_s64_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
if (usgn)
return Builder.CreateFPToUI(Ops[0], InTy);
return Builder.CreateFPToSI(Ops[0], InTy);
}
case NEON::BI__builtin_neon_vcvts_f32_u32:
case NEON::BI__builtin_neon_vcvtd_f64_u64:
usgn = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vcvts_f32_s32:
case NEON::BI__builtin_neon_vcvtd_f64_s64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
if (usgn)
return Builder.CreateUIToFP(Ops[0], FTy);
return Builder.CreateSIToFP(Ops[0], FTy);
}
case NEON::BI__builtin_neon_vcvth_f16_u16:
case NEON::BI__builtin_neon_vcvth_f16_u32:
case NEON::BI__builtin_neon_vcvth_f16_u64:
usgn = true;
// FALL THROUGH
case NEON::BI__builtin_neon_vcvth_f16_s16:
case NEON::BI__builtin_neon_vcvth_f16_s32:
case NEON::BI__builtin_neon_vcvth_f16_s64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
llvm::Type *FTy = HalfTy;
llvm::Type *InTy;
if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
InTy = Int64Ty;
else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
InTy = Int32Ty;
else
InTy = Int16Ty;
Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
if (usgn)
return Builder.CreateUIToFP(Ops[0], FTy);
return Builder.CreateSIToFP(Ops[0], FTy);
}
case NEON::BI__builtin_neon_vcvth_u16_f16:
usgn = true;
// FALL THROUGH
case NEON::BI__builtin_neon_vcvth_s16_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
if (usgn)
return Builder.CreateFPToUI(Ops[0], Int16Ty);
return Builder.CreateFPToSI(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vcvth_u32_f16:
usgn = true;
// FALL THROUGH
case NEON::BI__builtin_neon_vcvth_s32_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
if (usgn)
return Builder.CreateFPToUI(Ops[0], Int32Ty);
return Builder.CreateFPToSI(Ops[0], Int32Ty);
}
case NEON::BI__builtin_neon_vcvth_u64_f16:
usgn = true;
// FALL THROUGH
case NEON::BI__builtin_neon_vcvth_s64_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
if (usgn)
return Builder.CreateFPToUI(Ops[0], Int64Ty);
return Builder.CreateFPToSI(Ops[0], Int64Ty);
}
case NEON::BI__builtin_neon_vcvtah_u16_f16:
case NEON::BI__builtin_neon_vcvtmh_u16_f16:
case NEON::BI__builtin_neon_vcvtnh_u16_f16:
case NEON::BI__builtin_neon_vcvtph_u16_f16:
case NEON::BI__builtin_neon_vcvtah_s16_f16:
case NEON::BI__builtin_neon_vcvtmh_s16_f16:
case NEON::BI__builtin_neon_vcvtnh_s16_f16:
case NEON::BI__builtin_neon_vcvtph_s16_f16: {
unsigned Int;
llvm::Type* InTy = Int32Ty;
llvm::Type* FTy = HalfTy;
llvm::Type *Tys[2] = {InTy, FTy};
Ops.push_back(EmitScalarExpr(E->getArg(0)));
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vcvtah_u16_f16:
Int = Intrinsic::aarch64_neon_fcvtau; break;
case NEON::BI__builtin_neon_vcvtmh_u16_f16:
Int = Intrinsic::aarch64_neon_fcvtmu; break;
case NEON::BI__builtin_neon_vcvtnh_u16_f16:
Int = Intrinsic::aarch64_neon_fcvtnu; break;
case NEON::BI__builtin_neon_vcvtph_u16_f16:
Int = Intrinsic::aarch64_neon_fcvtpu; break;
case NEON::BI__builtin_neon_vcvtah_s16_f16:
Int = Intrinsic::aarch64_neon_fcvtas; break;
case NEON::BI__builtin_neon_vcvtmh_s16_f16:
Int = Intrinsic::aarch64_neon_fcvtms; break;
case NEON::BI__builtin_neon_vcvtnh_s16_f16:
Int = Intrinsic::aarch64_neon_fcvtns; break;
case NEON::BI__builtin_neon_vcvtph_s16_f16:
Int = Intrinsic::aarch64_neon_fcvtps; break;
}
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vcaleh_f16:
case NEON::BI__builtin_neon_vcalth_f16:
case NEON::BI__builtin_neon_vcageh_f16:
case NEON::BI__builtin_neon_vcagth_f16: {
unsigned Int;
llvm::Type* InTy = Int32Ty;
llvm::Type* FTy = HalfTy;
llvm::Type *Tys[2] = {InTy, FTy};
Ops.push_back(EmitScalarExpr(E->getArg(1)));
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vcageh_f16:
Int = Intrinsic::aarch64_neon_facge; break;
case NEON::BI__builtin_neon_vcagth_f16:
Int = Intrinsic::aarch64_neon_facgt; break;
case NEON::BI__builtin_neon_vcaleh_f16:
Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
case NEON::BI__builtin_neon_vcalth_f16:
Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
}
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vcvth_n_s16_f16:
case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
unsigned Int;
llvm::Type* InTy = Int32Ty;
llvm::Type* FTy = HalfTy;
llvm::Type *Tys[2] = {InTy, FTy};
Ops.push_back(EmitScalarExpr(E->getArg(1)));
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vcvth_n_s16_f16:
Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
case NEON::BI__builtin_neon_vcvth_n_u16_f16:
Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
}
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vcvth_n_f16_s16:
case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
unsigned Int;
llvm::Type* FTy = HalfTy;
llvm::Type* InTy = Int32Ty;
llvm::Type *Tys[2] = {FTy, InTy};
Ops.push_back(EmitScalarExpr(E->getArg(1)));
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vcvth_n_f16_s16:
Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
break;
case NEON::BI__builtin_neon_vcvth_n_f16_u16:
Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
Ops[0] = Builder.CreateZExt(Ops[0], InTy);
break;
}
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
}
case NEON::BI__builtin_neon_vpaddd_s64: {
llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f64, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
// Pairwise addition of a v2f64 into a scalar f64.
return Builder.CreateAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vpaddd_f64: {
llvm::Type *Ty =
llvm::VectorType::get(DoubleTy, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f64, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
// Pairwise addition of a v2f64 into a scalar f64.
return Builder.CreateFAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vpadds_f32: {
llvm::Type *Ty =
llvm::VectorType::get(FloatTy, 2);
Value *Vec = EmitScalarExpr(E->getArg(0));
// The vector is v2f32, so make sure it's bitcast to that.
Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
// Pairwise addition of a v2f32 into a scalar f32.
return Builder.CreateFAdd(Op0, Op1, "vpaddd");
}
case NEON::BI__builtin_neon_vceqzd_s64:
case NEON::BI__builtin_neon_vceqzd_f64:
case NEON::BI__builtin_neon_vceqzs_f32:
case NEON::BI__builtin_neon_vceqzh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
case NEON::BI__builtin_neon_vcgezd_s64:
case NEON::BI__builtin_neon_vcgezd_f64:
case NEON::BI__builtin_neon_vcgezs_f32:
case NEON::BI__builtin_neon_vcgezh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
case NEON::BI__builtin_neon_vclezd_s64:
case NEON::BI__builtin_neon_vclezd_f64:
case NEON::BI__builtin_neon_vclezs_f32:
case NEON::BI__builtin_neon_vclezh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
case NEON::BI__builtin_neon_vcgtzd_s64:
case NEON::BI__builtin_neon_vcgtzd_f64:
case NEON::BI__builtin_neon_vcgtzs_f32:
case NEON::BI__builtin_neon_vcgtzh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
case NEON::BI__builtin_neon_vcltzd_s64:
case NEON::BI__builtin_neon_vcltzd_f64:
case NEON::BI__builtin_neon_vcltzs_f32:
case NEON::BI__builtin_neon_vcltzh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitAArch64CompareBuiltinExpr(
Ops[0], ConvertType(E->getCallReturnType(getContext())),
ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
case NEON::BI__builtin_neon_vceqzd_u64: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
Ops[0] =
Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
}
case NEON::BI__builtin_neon_vceqd_f64:
case NEON::BI__builtin_neon_vcled_f64:
case NEON::BI__builtin_neon_vcltd_f64:
case NEON::BI__builtin_neon_vcged_f64:
case NEON::BI__builtin_neon_vcgtd_f64: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqs_f32:
case NEON::BI__builtin_neon_vcles_f32:
case NEON::BI__builtin_neon_vclts_f32:
case NEON::BI__builtin_neon_vcges_f32:
case NEON::BI__builtin_neon_vcgts_f32: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqh_f16:
case NEON::BI__builtin_neon_vcleh_f16:
case NEON::BI__builtin_neon_vclth_f16:
case NEON::BI__builtin_neon_vcgeh_f16:
case NEON::BI__builtin_neon_vcgth_f16: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqd_s64:
case NEON::BI__builtin_neon_vceqd_u64:
case NEON::BI__builtin_neon_vcgtd_s64:
case NEON::BI__builtin_neon_vcgtd_u64:
case NEON::BI__builtin_neon_vcltd_s64:
case NEON::BI__builtin_neon_vcltd_u64:
case NEON::BI__builtin_neon_vcged_u64:
case NEON::BI__builtin_neon_vcged_s64:
case NEON::BI__builtin_neon_vcled_u64:
case NEON::BI__builtin_neon_vcled_s64: {
llvm::CmpInst::Predicate P;
switch (BuiltinID) {
default: llvm_unreachable("missing builtin ID in switch!");
case NEON::BI__builtin_neon_vceqd_s64:
case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
}
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
}
case NEON::BI__builtin_neon_vtstd_s64:
case NEON::BI__builtin_neon_vtstd_u64: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
llvm::Constant::getNullValue(Int64Ty));
return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
}
case NEON::BI__builtin_neon_vset_lane_i8:
case NEON::BI__builtin_neon_vset_lane_i16:
case NEON::BI__builtin_neon_vset_lane_i32:
case NEON::BI__builtin_neon_vset_lane_i64:
case NEON::BI__builtin_neon_vset_lane_f32:
case NEON::BI__builtin_neon_vsetq_lane_i8:
case NEON::BI__builtin_neon_vsetq_lane_i16:
case NEON::BI__builtin_neon_vsetq_lane_i32:
case NEON::BI__builtin_neon_vsetq_lane_i64:
case NEON::BI__builtin_neon_vsetq_lane_f32:
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vset_lane_f64:
// The vector type needs a cast for the v1f64 variant.
Ops[1] = Builder.CreateBitCast(Ops[1],
llvm::VectorType::get(DoubleTy, 1));
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vsetq_lane_f64:
// The vector type needs a cast for the v2f64 variant.
Ops[1] = Builder.CreateBitCast(Ops[1],
llvm::VectorType::get(DoubleTy, 2));
Ops.push_back(EmitScalarExpr(E->getArg(2)));
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
case NEON::BI__builtin_neon_vget_lane_i8:
case NEON::BI__builtin_neon_vdupb_lane_i8:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 8));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_i8:
case NEON::BI__builtin_neon_vdupb_laneq_i8:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 16));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i16:
case NEON::BI__builtin_neon_vduph_lane_i16:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_i16:
case NEON::BI__builtin_neon_vduph_laneq_i16:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 8));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i32:
case NEON::BI__builtin_neon_vdups_lane_i32:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vdups_lane_f32:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(FloatTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vdups_lane");
case NEON::BI__builtin_neon_vgetq_lane_i32:
case NEON::BI__builtin_neon_vdups_laneq_i32:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_i64:
case NEON::BI__builtin_neon_vdupd_lane_i64:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vdupd_lane_f64:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(DoubleTy, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vdupd_lane");
case NEON::BI__builtin_neon_vgetq_lane_i64:
case NEON::BI__builtin_neon_vdupd_laneq_i64:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vget_lane_f32:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(FloatTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vget_lane_f64:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(DoubleTy, 1));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vget_lane");
case NEON::BI__builtin_neon_vgetq_lane_f32:
case NEON::BI__builtin_neon_vdups_laneq_f32:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(FloatTy, 4));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vgetq_lane_f64:
case NEON::BI__builtin_neon_vdupd_laneq_f64:
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::VectorType::get(DoubleTy, 2));
return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
"vgetq_lane");
case NEON::BI__builtin_neon_vaddh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
case NEON::BI__builtin_neon_vsubh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
case NEON::BI__builtin_neon_vmulh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
case NEON::BI__builtin_neon_vdivh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
case NEON::BI__builtin_neon_vfmah_f16: {
Value *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
return Builder.CreateCall(F,
{EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
}
case NEON::BI__builtin_neon_vfmsh_f16: {
Value *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
// NEON intrinsic puts accumulator first, unlike the LLVM fma.
return Builder.CreateCall(F, {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
}
case NEON::BI__builtin_neon_vaddd_s64:
case NEON::BI__builtin_neon_vaddd_u64:
return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
case NEON::BI__builtin_neon_vsubd_s64:
case NEON::BI__builtin_neon_vsubd_u64:
return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
case NEON::BI__builtin_neon_vqdmlalh_s16:
case NEON::BI__builtin_neon_vqdmlslh_s16: {
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(vectorWrapScalar16(Ops[1]));
ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
ProductOps, "vqdmlXl");
Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
? Intrinsic::aarch64_neon_sqadd
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
}
case NEON::BI__builtin_neon_vqshlud_n_s64: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
Ops, "vqshlu_n");
}
case NEON::BI__builtin_neon_vqshld_n_u64:
case NEON::BI__builtin_neon_vqshld_n_s64: {
unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
? Intrinsic::aarch64_neon_uqshl
: Intrinsic::aarch64_neon_sqshl;
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
}
case NEON::BI__builtin_neon_vrshrd_n_u64:
case NEON::BI__builtin_neon_vrshrd_n_s64: {
unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
? Intrinsic::aarch64_neon_urshl
: Intrinsic::aarch64_neon_srshl;
Ops.push_back(EmitScalarExpr(E->getArg(1)));
int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
Ops[1] = ConstantInt::get(Int64Ty, -SV);
return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
}
case NEON::BI__builtin_neon_vrsrad_n_u64:
case NEON::BI__builtin_neon_vrsrad_n_s64: {
unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
? Intrinsic::aarch64_neon_urshl
: Intrinsic::aarch64_neon_srshl;
Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
{Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
}
case NEON::BI__builtin_neon_vshld_n_s64:
case NEON::BI__builtin_neon_vshld_n_u64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
return Builder.CreateShl(
Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
}
case NEON::BI__builtin_neon_vshrd_n_s64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
return Builder.CreateAShr(
Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
Amt->getZExtValue())),
"shrd_n");
}
case NEON::BI__builtin_neon_vshrd_n_u64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
uint64_t ShiftAmt = Amt->getZExtValue();
// Right-shifting an unsigned value by its size yields 0.
if (ShiftAmt == 64)
return ConstantInt::get(Int64Ty, 0);
return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
"shrd_n");
}
case NEON::BI__builtin_neon_vsrad_n_s64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
Ops[1] = Builder.CreateAShr(
Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
Amt->getZExtValue())),
"shrd_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
}
case NEON::BI__builtin_neon_vsrad_n_u64: {
llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
uint64_t ShiftAmt = Amt->getZExtValue();
// Right-shifting an unsigned value by its size yields 0.
// As Op + 0 = Op, return Ops[0] directly.
if (ShiftAmt == 64)
return Ops[0];
Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
"shrd_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
}
case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
"lane");
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(vectorWrapScalar16(Ops[1]));
ProductOps.push_back(vectorWrapScalar16(Ops[2]));
llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
ProductOps, "vqdmlXl");
Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
Ops.pop_back();
unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
? Intrinsic::aarch64_neon_sqadd
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
}
case NEON::BI__builtin_neon_vqdmlals_s32:
case NEON::BI__builtin_neon_vqdmlsls_s32: {
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(Ops[1]);
ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
Ops[1] =
EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
ProductOps, "vqdmlXl");
unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
? Intrinsic::aarch64_neon_sqadd
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
}
case NEON::BI__builtin_neon_vqdmlals_lane_s32:
case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
"lane");
SmallVector<Value *, 2> ProductOps;
ProductOps.push_back(Ops[1]);
ProductOps.push_back(Ops[2]);
Ops[1] =
EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
ProductOps, "vqdmlXl");
Ops.pop_back();
unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
? Intrinsic::aarch64_neon_sqadd
: Intrinsic::aarch64_neon_sqsub;
return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
}
}
llvm::VectorType *VTy = GetNeonType(this, Type);
llvm::Type *Ty = VTy;
if (!Ty)
return nullptr;
// Not all intrinsics handled by the common case work for AArch64 yet, so only
// defer to common code if it's been added to our special map.
Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
AArch64SIMDIntrinsicsProvenSorted);
if (Builtin)
return EmitCommonNeonBuiltinExpr(
Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
Builtin->NameHint, Builtin->TypeModifier, E, Ops,
/*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
return V;
unsigned Int;
switch (BuiltinID) {
default: return nullptr;
case NEON::BI__builtin_neon_vbsl_v:
case NEON::BI__builtin_neon_vbslq_v: {
llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
return Builder.CreateBitCast(Ops[0], Ty);
}
case NEON::BI__builtin_neon_vfma_lane_v:
case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
// The ARM builtins (and instructions) have the addend as the first
// operand, but the 'fma' intrinsics have it last. Swap it around here.
Value *Addend = Ops[0];
Value *Multiplicand = Ops[1];
Value *LaneSource = Ops[2];
Ops[0] = Multiplicand;
Ops[1] = LaneSource;
Ops[2] = Addend;
// Now adjust things to handle the lane access.
llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
VTy;
llvm::Constant *cst = cast<Constant>(Ops[3]);
Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
Ops.pop_back();
Int = Intrinsic::fma;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
}
case NEON::BI__builtin_neon_vfma_laneq_v: {
llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
// v1f64 fma should be mapped to Neon scalar f64 fma
if (VTy && VTy->getElementType() == DoubleTy) {
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
llvm::Type *VTy = GetNeonType(this,
NeonTypeFlags(NeonTypeFlags::Float64, false, true));
Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
return Builder.CreateBitCast(Result, Ty);
}
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
VTy->getNumElements() * 2);
Ops[2] = Builder.CreateBitCast(Ops[2], STy);
Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
cast<ConstantInt>(Ops[3]));
Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
}
case NEON::BI__builtin_neon_vfmaq_laneq_v: {
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
}
case NEON::BI__builtin_neon_vfmah_lane_f16:
case NEON::BI__builtin_neon_vfmas_lane_f32:
case NEON::BI__builtin_neon_vfmah_laneq_f16:
case NEON::BI__builtin_neon_vfmas_laneq_f32:
case NEON::BI__builtin_neon_vfmad_lane_f64:
case NEON::BI__builtin_neon_vfmad_laneq_f64: {
Ops.push_back(EmitScalarExpr(E->getArg(3)));
llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
}
case NEON::BI__builtin_neon_vmull_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
case NEON::BI__builtin_neon_vmax_v:
case NEON::BI__builtin_neon_vmaxq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
case NEON::BI__builtin_neon_vmaxh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Int = Intrinsic::aarch64_neon_fmax;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
}
case NEON::BI__builtin_neon_vmin_v:
case NEON::BI__builtin_neon_vminq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
case NEON::BI__builtin_neon_vminh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Int = Intrinsic::aarch64_neon_fmin;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
}
case NEON::BI__builtin_neon_vabd_v:
case NEON::BI__builtin_neon_vabdq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
case NEON::BI__builtin_neon_vpadal_v:
case NEON::BI__builtin_neon_vpadalq_v: {
unsigned ArgElts = VTy->getNumElements();
llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
unsigned BitWidth = EltTy->getBitWidth();
llvm::Type *ArgTy = llvm::VectorType::get(
llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
llvm::Type* Tys[2] = { VTy, ArgTy };
Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
SmallVector<llvm::Value*, 1> TmpOps;
TmpOps.push_back(Ops[1]);
Function *F = CGM.getIntrinsic(Int, Tys);
llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
return Builder.CreateAdd(tmp, addend);
}
case NEON::BI__builtin_neon_vpmin_v:
case NEON::BI__builtin_neon_vpminq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
case NEON::BI__builtin_neon_vpmax_v:
case NEON::BI__builtin_neon_vpmaxq_v:
// FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
case NEON::BI__builtin_neon_vminnm_v:
case NEON::BI__builtin_neon_vminnmq_v:
Int = Intrinsic::aarch64_neon_fminnm;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
case NEON::BI__builtin_neon_vminnmh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Int = Intrinsic::aarch64_neon_fminnm;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
case NEON::BI__builtin_neon_vmaxnm_v:
case NEON::BI__builtin_neon_vmaxnmq_v:
Int = Intrinsic::aarch64_neon_fmaxnm;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
case NEON::BI__builtin_neon_vmaxnmh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Int = Intrinsic::aarch64_neon_fmaxnm;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
case NEON::BI__builtin_neon_vrecpss_f32: {
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
Ops, "vrecps");
}
case NEON::BI__builtin_neon_vrecpsd_f64:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
Ops, "vrecps");
case NEON::BI__builtin_neon_vrecpsh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(1)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
Ops, "vrecps");
case NEON::BI__builtin_neon_vqshrun_n_v:
Int = Intrinsic::aarch64_neon_sqshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
case NEON::BI__builtin_neon_vqrshrun_n_v:
Int = Intrinsic::aarch64_neon_sqrshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
case NEON::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
case NEON::BI__builtin_neon_vrshrn_n_v:
Int = Intrinsic::aarch64_neon_rshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
case NEON::BI__builtin_neon_vqrshrn_n_v:
Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
case NEON::BI__builtin_neon_vrndah_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::round;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
}
case NEON::BI__builtin_neon_vrnda_v:
case NEON::BI__builtin_neon_vrndaq_v: {
Int = Intrinsic::round;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
}
case NEON::BI__builtin_neon_vrndih_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::nearbyint;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
}
case NEON::BI__builtin_neon_vrndmh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::floor;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
}
case NEON::BI__builtin_neon_vrndm_v:
case NEON::BI__builtin_neon_vrndmq_v: {
Int = Intrinsic::floor;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
}
case NEON::BI__builtin_neon_vrndnh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frintn;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
}
case NEON::BI__builtin_neon_vrndn_v:
case NEON::BI__builtin_neon_vrndnq_v: {
Int = Intrinsic::aarch64_neon_frintn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
}
case NEON::BI__builtin_neon_vrndns_f32: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::aarch64_neon_frintn;
return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
}
case NEON::BI__builtin_neon_vrndph_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::ceil;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
}
case NEON::BI__builtin_neon_vrndp_v:
case NEON::BI__builtin_neon_vrndpq_v: {
Int = Intrinsic::ceil;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
}
case NEON::BI__builtin_neon_vrndxh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::rint;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
}
case NEON::BI__builtin_neon_vrndx_v:
case NEON::BI__builtin_neon_vrndxq_v: {
Int = Intrinsic::rint;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
}
case NEON::BI__builtin_neon_vrndh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
}
case NEON::BI__builtin_neon_vrnd_v:
case NEON::BI__builtin_neon_vrndq_v: {
Int = Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
}
case NEON::BI__builtin_neon_vcvt_f64_v:
case NEON::BI__builtin_neon_vcvtq_f64_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
: Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
case NEON::BI__builtin_neon_vcvt_f64_f32: {
assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
"unexpected vcvt_f64_f32 builtin");
NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
}
case NEON::BI__builtin_neon_vcvt_f32_f64: {
assert(Type.getEltType() == NeonTypeFlags::Float32 &&
"unexpected vcvt_f32_f64 builtin");
NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
}
case NEON::BI__builtin_neon_vcvt_s32_v:
case NEON::BI__builtin_neon_vcvt_u32_v:
case NEON::BI__builtin_neon_vcvt_s64_v:
case NEON::BI__builtin_neon_vcvt_u64_v:
case NEON::BI__builtin_neon_vcvt_s16_v:
case NEON::BI__builtin_neon_vcvt_u16_v:
case NEON::BI__builtin_neon_vcvtq_s32_v:
case NEON::BI__builtin_neon_vcvtq_u32_v:
case NEON::BI__builtin_neon_vcvtq_s64_v:
case NEON::BI__builtin_neon_vcvtq_u64_v:
case NEON::BI__builtin_neon_vcvtq_s16_v:
case NEON::BI__builtin_neon_vcvtq_u16_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
if (usgn)
return Builder.CreateFPToUI(Ops[0], Ty);
return Builder.CreateFPToSI(Ops[0], Ty);
}
case NEON::BI__builtin_neon_vcvta_s16_v:
case NEON::BI__builtin_neon_vcvta_u16_v:
case NEON::BI__builtin_neon_vcvta_s32_v:
case NEON::BI__builtin_neon_vcvtaq_s16_v:
case NEON::BI__builtin_neon_vcvtaq_s32_v:
case NEON::BI__builtin_neon_vcvta_u32_v:
case NEON::BI__builtin_neon_vcvtaq_u16_v:
case NEON::BI__builtin_neon_vcvtaq_u32_v:
case NEON::BI__builtin_neon_vcvta_s64_v:
case NEON::BI__builtin_neon_vcvtaq_s64_v:
case NEON::BI__builtin_neon_vcvta_u64_v:
case NEON::BI__builtin_neon_vcvtaq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
}
case NEON::BI__builtin_neon_vcvtm_s16_v:
case NEON::BI__builtin_neon_vcvtm_s32_v:
case NEON::BI__builtin_neon_vcvtmq_s16_v:
case NEON::BI__builtin_neon_vcvtmq_s32_v:
case NEON::BI__builtin_neon_vcvtm_u16_v:
case NEON::BI__builtin_neon_vcvtm_u32_v:
case NEON::BI__builtin_neon_vcvtmq_u16_v:
case NEON::BI__builtin_neon_vcvtmq_u32_v:
case NEON::BI__builtin_neon_vcvtm_s64_v:
case NEON::BI__builtin_neon_vcvtmq_s64_v:
case NEON::BI__builtin_neon_vcvtm_u64_v:
case NEON::BI__builtin_neon_vcvtmq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
}
case NEON::BI__builtin_neon_vcvtn_s16_v:
case NEON::BI__builtin_neon_vcvtn_s32_v:
case NEON::BI__builtin_neon_vcvtnq_s16_v:
case NEON::BI__builtin_neon_vcvtnq_s32_v:
case NEON::BI__builtin_neon_vcvtn_u16_v:
case NEON::BI__builtin_neon_vcvtn_u32_v:
case NEON::BI__builtin_neon_vcvtnq_u16_v:
case NEON::BI__builtin_neon_vcvtnq_u32_v:
case NEON::BI__builtin_neon_vcvtn_s64_v:
case NEON::BI__builtin_neon_vcvtnq_s64_v:
case NEON::BI__builtin_neon_vcvtn_u64_v:
case NEON::BI__builtin_neon_vcvtnq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
}
case NEON::BI__builtin_neon_vcvtp_s16_v:
case NEON::BI__builtin_neon_vcvtp_s32_v:
case NEON::BI__builtin_neon_vcvtpq_s16_v:
case NEON::BI__builtin_neon_vcvtpq_s32_v:
case NEON::BI__builtin_neon_vcvtp_u16_v:
case NEON::BI__builtin_neon_vcvtp_u32_v:
case NEON::BI__builtin_neon_vcvtpq_u16_v:
case NEON::BI__builtin_neon_vcvtpq_u32_v:
case NEON::BI__builtin_neon_vcvtp_s64_v:
case NEON::BI__builtin_neon_vcvtpq_s64_v:
case NEON::BI__builtin_neon_vcvtp_u64_v:
case NEON::BI__builtin_neon_vcvtpq_u64_v: {
Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
}
case NEON::BI__builtin_neon_vmulx_v:
case NEON::BI__builtin_neon_vmulxq_v: {
Int = Intrinsic::aarch64_neon_fmulx;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
}
case NEON::BI__builtin_neon_vmulxh_lane_f16:
case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
// vmulx_lane should be mapped to Neon scalar mulx after
// extracting the scalar element
Ops.push_back(EmitScalarExpr(E->getArg(2)));
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
Ops.pop_back();
Int = Intrinsic::aarch64_neon_fmulx;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
}
case NEON::BI__builtin_neon_vmul_lane_v:
case NEON::BI__builtin_neon_vmul_laneq_v: {
// v1f64 vmul_lane should be mapped to Neon scalar mul lane
bool Quad = false;
if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
Quad = true;
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
llvm::Type *VTy = GetNeonType(this,
NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
return Builder.CreateBitCast(Result, Ty);
}
case NEON::BI__builtin_neon_vnegd_s64:
return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
case NEON::BI__builtin_neon_vnegh_f16:
return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
case NEON::BI__builtin_neon_vpmaxnm_v:
case NEON::BI__builtin_neon_vpmaxnmq_v: {
Int = Intrinsic::aarch64_neon_fmaxnmp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
}
case NEON::BI__builtin_neon_vpminnm_v:
case NEON::BI__builtin_neon_vpminnmq_v: {
Int = Intrinsic::aarch64_neon_fminnmp;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
}
case NEON::BI__builtin_neon_vsqrth_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Int = Intrinsic::sqrt;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
}
case NEON::BI__builtin_neon_vsqrt_v:
case NEON::BI__builtin_neon_vsqrtq_v: {
Int = Intrinsic::sqrt;
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
}
case NEON::BI__builtin_neon_vrbit_v:
case NEON::BI__builtin_neon_vrbitq_v: {
Int = Intrinsic::aarch64_neon_rbit;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
}
case NEON::BI__builtin_neon_vaddv_u8:
// FIXME: These are handled by the AArch64 scalar code.
usgn = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddv_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vaddv_u16:
usgn = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddv_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddvq_u8:
usgn = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddvq_s8: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vaddvq_u16:
usgn = true;
LLVM_FALLTHROUGH;
case NEON::BI__builtin_neon_vaddvq_s16: {
Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxv_u8: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vmaxv_u16: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxvq_u8: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vmaxvq_u16: {
Int = Intrinsic::aarch64_neon_umaxv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxv_s8: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vmaxv_s16: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxvq_s8: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vmaxvq_s16: {
Int = Intrinsic::aarch64_neon_smaxv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vmaxv_f16: {
Int = Intrinsic::aarch64_neon_fmaxv;
Ty = HalfTy;
VTy = llvm::VectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vmaxvq_f16: {
Int = Intrinsic::aarch64_neon_fmaxv;
Ty = HalfTy;
VTy = llvm::VectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vminv_u8: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vminv_u16: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vminvq_u8: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vminvq_u16: {
Int = Intrinsic::aarch64_neon_uminv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vminv_s8: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vminv_s16: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vminvq_s8: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int8Ty);
}
case NEON::BI__builtin_neon_vminvq_s16: {
Int = Intrinsic::aarch64_neon_sminv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vminv_f16: {
Int = Intrinsic::aarch64_neon_fminv;
Ty = HalfTy;
VTy = llvm::VectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vminvq_f16: {
Int = Intrinsic::aarch64_neon_fminv;
Ty = HalfTy;
VTy = llvm::VectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vmaxnmv_f16: {
Int = Intrinsic::aarch64_neon_fmaxnmv;
Ty = HalfTy;
VTy = llvm::VectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vmaxnmvq_f16: {
Int = Intrinsic::aarch64_neon_fmaxnmv;
Ty = HalfTy;
VTy = llvm::VectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vminnmv_f16: {
Int = Intrinsic::aarch64_neon_fminnmv;
Ty = HalfTy;
VTy = llvm::VectorType::get(HalfTy, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vminnmvq_f16: {
Int = Intrinsic::aarch64_neon_fminnmv;
Ty = HalfTy;
VTy = llvm::VectorType::get(HalfTy, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
return Builder.CreateTrunc(Ops[0], HalfTy);
}
case NEON::BI__builtin_neon_vmul_n_f64: {
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
return Builder.CreateFMul(Ops[0], RHS);
}
case NEON::BI__builtin_neon_vaddlv_u8: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddlv_u16: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vaddlvq_u8: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddlvq_u16: {
Int = Intrinsic::aarch64_neon_uaddlv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vaddlv_s8: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddlv_s16: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 4);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vaddlvq_s8: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int8Ty, 16);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
return Builder.CreateTrunc(Ops[0], Int16Ty);
}
case NEON::BI__builtin_neon_vaddlvq_s16: {
Int = Intrinsic::aarch64_neon_saddlv;
Ty = Int32Ty;
VTy = llvm::VectorType::get(Int16Ty, 8);
llvm::Type *Tys[2] = { Ty, VTy };
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
}
case NEON::BI__builtin_neon_vsri_n_v:
case NEON::BI__builtin_neon_vsriq_n_v: {
Int = Intrinsic::aarch64_neon_vsri;
llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
return EmitNeonCall(Intrin, Ops, "vsri_n");
}
case NEON::BI__builtin_neon_vsli_n_v:
case NEON::BI__builtin_neon_vsliq_n_v: {
Int = Intrinsic::aarch64_neon_vsli;
llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
return EmitNeonCall(Intrin, Ops, "vsli_n");
}
case NEON::BI__builtin_neon_vsra_n_v:
case NEON::BI__builtin_neon_vsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
case NEON::BI__builtin_neon_vrsra_n_v:
case NEON::BI__builtin_neon_vrsraq_n_v: {
Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
SmallVector<llvm::Value*,2> TmpOps;
TmpOps.push_back(Ops[1]);
TmpOps.push_back(Ops[2]);
Function* F = CGM.getIntrinsic(Int, Ty);
llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
return Builder.CreateAdd(Ops[0], tmp);
}
case NEON::BI__builtin_neon_vld1_v:
case NEON::BI__builtin_neon_vld1q_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
auto Alignment = CharUnits::fromQuantity(
BuiltinID == NEON::BI__builtin_neon_vld1_v ? 8 : 16);
return Builder.CreateAlignedLoad(VTy, Ops[0], Alignment);
}
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
case NEON::BI__builtin_neon_vld1_lane_v:
case NEON::BI__builtin_neon_vld1q_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
auto Alignment = CharUnits::fromQuantity(
BuiltinID == NEON::BI__builtin_neon_vld1_lane_v ? 8 : 16);
Ops[0] =
Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
}
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
auto Alignment = CharUnits::fromQuantity(
BuiltinID == NEON::BI__builtin_neon_vld1_dup_v ? 8 : 16);
Ops[0] =
Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
return EmitNeonSplat(Ops[0], CI);
}
case NEON::BI__builtin_neon_vst1_lane_v:
case NEON::BI__builtin_neon_vst1q_lane_v:
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
return Builder.CreateDefaultAlignedStore(Ops[1],
Builder.CreateBitCast(Ops[0], Ty));
case NEON::BI__builtin_neon_vld2_v:
case NEON::BI__builtin_neon_vld2q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_v:
case NEON::BI__builtin_neon_vld3q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_v:
case NEON::BI__builtin_neon_vld4q_v: {
llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_dup_v:
case NEON::BI__builtin_neon_vld2q_dup_v: {
llvm::Type *PTy =
llvm::PointerType::getUnqual(VTy->getElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_dup_v:
case NEON::BI__builtin_neon_vld3q_dup_v: {
llvm::Type *PTy =
llvm::PointerType::getUnqual(VTy->getElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_dup_v:
case NEON::BI__builtin_neon_vld4q_dup_v: {
llvm::Type *PTy =
llvm::PointerType::getUnqual(VTy->getElementType());
Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
llvm::Type *Tys[2] = { VTy, PTy };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
Ops[0] = Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld2_lane_v:
case NEON::BI__builtin_neon_vld2q_lane_v: {
llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
Ops.push_back(Ops[1]);
Ops.erase(Ops.begin()+1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld3_lane_v:
case NEON::BI__builtin_neon_vld3q_lane_v: {
llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
Ops.push_back(Ops[1]);
Ops.erase(Ops.begin()+1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vld4_lane_v:
case NEON::BI__builtin_neon_vld4q_lane_v: {
llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
Ops.push_back(Ops[1]);
Ops.erase(Ops.begin()+1);
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case NEON::BI__builtin_neon_vst2_v:
case NEON::BI__builtin_neon_vst2q_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst2_lane_v:
case NEON::BI__builtin_neon_vst2q_lane_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst3_v:
case NEON::BI__builtin_neon_vst3q_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst3_lane_v:
case NEON::BI__builtin_neon_vst3q_lane_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst4_v:
case NEON::BI__builtin_neon_vst4q_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vst4_lane_v:
case NEON::BI__builtin_neon_vst4q_lane_v: {
Ops.push_back(Ops[0]);
Ops.erase(Ops.begin());
Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
Ops, "");
}
case NEON::BI__builtin_neon_vtrn_v:
case NEON::BI__builtin_neon_vtrnq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<uint32_t, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back(i+vi);
Indices.push_back(i+e+vi);
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vuzp_v:
case NEON::BI__builtin_neon_vuzpq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<uint32_t, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
Indices.push_back(2*i+vi);
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vzip_v:
case NEON::BI__builtin_neon_vzipq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Value *SV = nullptr;
for (unsigned vi = 0; vi != 2; ++vi) {
SmallVector<uint32_t, 16> Indices;
for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
Indices.push_back((i + vi*e) >> 1);
Indices.push_back(((i + vi*e) >> 1)+e);
}
Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
SV = Builder.CreateDefaultAlignedStore(SV, Addr);
}
return SV;
}
case NEON::BI__builtin_neon_vqtbl1q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
Ops, "vtbl1");
}
case NEON::BI__builtin_neon_vqtbl2q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
Ops, "vtbl2");
}
case NEON::BI__builtin_neon_vqtbl3q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
Ops, "vtbl3");
}
case NEON::BI__builtin_neon_vqtbl4q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
Ops, "vtbl4");
}
case NEON::BI__builtin_neon_vqtbx1q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
Ops, "vtbx1");
}
case NEON::BI__builtin_neon_vqtbx2q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
Ops, "vtbx2");
}
case NEON::BI__builtin_neon_vqtbx3q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
Ops, "vtbx3");
}
case NEON::BI__builtin_neon_vqtbx4q_v: {
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
Ops, "vtbx4");
}
case NEON::BI__builtin_neon_vsqadd_v:
case NEON::BI__builtin_neon_vsqaddq_v: {
Int = Intrinsic::aarch64_neon_usqadd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
}
case NEON::BI__builtin_neon_vuqadd_v:
case NEON::BI__builtin_neon_vuqaddq_v: {
Int = Intrinsic::aarch64_neon_suqadd;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
}
case AArch64::BI__iso_volatile_load8:
case AArch64::BI__iso_volatile_load16:
case AArch64::BI__iso_volatile_load32:
case AArch64::BI__iso_volatile_load64:
return EmitISOVolatileLoad(E);
case AArch64::BI__iso_volatile_store8:
case AArch64::BI__iso_volatile_store16:
case AArch64::BI__iso_volatile_store32:
case AArch64::BI__iso_volatile_store64:
return EmitISOVolatileStore(E);
case AArch64::BI_BitScanForward:
case AArch64::BI_BitScanForward64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
case AArch64::BI_BitScanReverse:
case AArch64::BI_BitScanReverse64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
case AArch64::BI_InterlockedAnd64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
case AArch64::BI_InterlockedExchange64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
case AArch64::BI_InterlockedExchangeAdd64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
case AArch64::BI_InterlockedExchangeSub64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
case AArch64::BI_InterlockedOr64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
case AArch64::BI_InterlockedXor64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
case AArch64::BI_InterlockedDecrement64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
case AArch64::BI_InterlockedIncrement64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
}
}
llvm::Value *CodeGenFunction::
BuildVector(ArrayRef<llvm::Value*> Ops) {
assert((Ops.size() & (Ops.size() - 1)) == 0 &&
"Not a power-of-two sized vector!");
bool AllConstants = true;
for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
AllConstants &= isa<Constant>(Ops[i]);
// If this is a constant vector, create a ConstantVector.
if (AllConstants) {
SmallVector<llvm::Constant*, 16> CstOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
CstOps.push_back(cast<Constant>(Ops[i]));
return llvm::ConstantVector::get(CstOps);
}
// Otherwise, insertelement the values to build the vector.
Value *Result =
llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
return Result;
}
// Convert the mask from an integer type to a vector of i1.
static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
unsigned NumElts) {
llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(),
cast<IntegerType>(Mask->getType())->getBitWidth());
Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
// If we have less than 8 elements, then the starting mask was an i8 and
// we need to extract down to the right number of elements.
if (NumElts < 8) {
uint32_t Indices[4];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
makeArrayRef(Indices, NumElts),
"extract");
}
return MaskVec;
}
static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
ArrayRef<Value *> Ops,
unsigned Align) {
// Cast the pointer to right type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
Value *MaskVec = getMaskVecValue(CGF, Ops[2],
Ops[1]->getType()->getVectorNumElements());
return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
}
static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
ArrayRef<Value *> Ops, unsigned Align) {
// Cast the pointer to right type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(Ops[1]->getType()));
Value *MaskVec = getMaskVecValue(CGF, Ops[2],
Ops[1]->getType()->getVectorNumElements());
return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]);
}
static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) {
llvm::Type *ResultTy = Ops[1]->getType();
llvm::Type *PtrTy = ResultTy->getVectorElementType();
// Cast the pointer to element type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(PtrTy));
Value *MaskVec = getMaskVecValue(CGF, Ops[2],
ResultTy->getVectorNumElements());
llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
ResultTy);
return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
}
static Value *EmitX86CompressStore(CodeGenFunction &CGF,
ArrayRef<Value *> Ops) {
llvm::Type *ResultTy = Ops[1]->getType();
llvm::Type *PtrTy = ResultTy->getVectorElementType();
// Cast the pointer to element type.
Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
llvm::PointerType::getUnqual(PtrTy));
Value *MaskVec = getMaskVecValue(CGF, Ops[2],
ResultTy->getVectorNumElements());
llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
ResultTy);
return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
}
static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
unsigned NumElts, ArrayRef<Value *> Ops,
bool InvertLHS = false) {
Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
if (InvertLHS)
LHS = CGF.Builder.CreateNot(LHS);
return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
CGF.Builder.getIntNTy(std::max(NumElts, 8U)));
}
static Value *EmitX86Select(CodeGenFunction &CGF,
Value *Mask, Value *Op0, Value *Op1) {
// If the mask is all ones just return first argument.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
return Op0;
Mask = getMaskVecValue(CGF, Mask, Op0->getType()->getVectorNumElements());
return CGF.Builder.CreateSelect(Mask, Op0, Op1);
}
static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
Value *Mask, Value *Op0, Value *Op1) {
// If the mask is all ones just return first argument.
if (const auto *C = dyn_cast<Constant>(Mask))
if (C->isAllOnesValue())
return Op0;
llvm::VectorType *MaskTy =
llvm::VectorType::get(CGF.Builder.getInt1Ty(),
Mask->getType()->getIntegerBitWidth());
Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
return CGF.Builder.CreateSelect(Mask, Op0, Op1);
}
static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
unsigned NumElts, Value *MaskIn) {
if (MaskIn) {
const auto *C = dyn_cast<Constant>(MaskIn);
if (!C || !C->isAllOnesValue())
Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
}
if (NumElts < 8) {
uint32_t Indices[8];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
for (unsigned i = NumElts; i != 8; ++i)
Indices[i] = i % NumElts + NumElts;
Cmp = CGF.Builder.CreateShuffleVector(
Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
}
return CGF.Builder.CreateBitCast(Cmp,
IntegerType::get(CGF.getLLVMContext(),
std::max(NumElts, 8U)));
}
static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
bool Signed, ArrayRef<Value *> Ops) {
assert((Ops.size() == 2 || Ops.size() == 4) &&
"Unexpected number of arguments");
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
Value *Cmp;
if (CC == 3) {
Cmp = Constant::getNullValue(
llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
} else if (CC == 7) {
Cmp = Constant::getAllOnesValue(
llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
} else {
ICmpInst::Predicate Pred;
switch (CC) {
default: llvm_unreachable("Unknown condition code");
case 0: Pred = ICmpInst::ICMP_EQ; break;
case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
case 4: Pred = ICmpInst::ICMP_NE; break;
case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
}
Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
}
Value *MaskIn = nullptr;
if (Ops.size() == 4)
MaskIn = Ops[3];
return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
}
static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
Value *Zero = Constant::getNullValue(In->getType());
return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
}
static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
llvm::Type *Ty = Ops[0]->getType();
Value *Zero = llvm::Constant::getNullValue(Ty);
Value *Sub = CGF.Builder.CreateSub(Zero, Ops[0]);
Value *Cmp = CGF.Builder.CreateICmp(ICmpInst::ICMP_SGT, Ops[0], Zero);
Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Sub);
return Res;
}
static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
ArrayRef<Value *> Ops) {
Value *Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
assert(Ops.size() == 2);
return Res;
}
// Lowers X86 FMA intrinsics to IR.
static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
unsigned BuiltinID, bool IsAddSub) {
bool Subtract = false;
Intrinsic::ID IID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
default: break;
case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
Subtract = true;
LLVM_FALLTHROUGH;
case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
Subtract = true;
LLVM_FALLTHROUGH;
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
Subtract = true;
LLVM_FALLTHROUGH;
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
break;
case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
Subtract = true;
LLVM_FALLTHROUGH;
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
break;
}
Value *A = Ops[0];
Value *B = Ops[1];
Value *C = Ops[2];
if (Subtract)
C = CGF.Builder.CreateFNeg(C);
Value *Res;
// Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
if (IID != Intrinsic::not_intrinsic &&
cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4) {
Function *Intr = CGF.CGM.getIntrinsic(IID);
Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
} else {
llvm::Type *Ty = A->getType();
Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
Res = CGF.Builder.CreateCall(FMA, {A, B, C} );
if (IsAddSub) {
// Negate even elts in C using a mask.
unsigned NumElts = Ty->getVectorNumElements();
SmallVector<uint32_t, 16> Indices(NumElts);
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + (i % 2) * NumElts;
Value *NegC = CGF.Builder.CreateFNeg(C);
Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} );
Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices);
}
}
// Handle any required masking.
Value *MaskFalseVal = nullptr;
switch (BuiltinID) {
case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
MaskFalseVal = Ops[0];
break;
case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
break;
case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
MaskFalseVal = Ops[2];
break;
}
if (MaskFalseVal)
return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
return Res;
}
static Value *
EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0,
bool NegAcc = false) {
unsigned Rnd = 4;
if (Ops.size() > 4)
Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
if (NegAcc)
Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
Value *Res;
if (Rnd != 4) {
Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
Intrinsic::x86_avx512_vfmadd_f32 :
Intrinsic::x86_avx512_vfmadd_f64;
Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
{Ops[0], Ops[1], Ops[2], Ops[4]});
} else {
Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
}
// If we have more than 3 arguments, we need to do masking.
if (Ops.size() > 3) {
Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
: Ops[PTIdx];
// If we negated the accumulator and the its the PassThru value we need to
// bypass the negate. Conveniently Upper should be the same thing in this
// case.
if (NegAcc && PTIdx == 2)
PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
}
return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
}
static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
ArrayRef<Value *> Ops) {
llvm::Type *Ty = Ops[0]->getType();
// Arguments have a vXi32 type so cast to vXi64.
Ty = llvm::VectorType::get(CGF.Int64Ty,
Ty->getPrimitiveSizeInBits() / 64);
Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
if (IsSigned) {
// Shift left then arithmetic shift right.
Constant *ShiftAmt = ConstantInt::get(Ty, 32);
LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
} else {
// Clear the upper bits.
Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
LHS = CGF.Builder.CreateAnd(LHS, Mask);
RHS = CGF.Builder.CreateAnd(RHS, Mask);
}
return CGF.Builder.CreateMul(LHS, RHS);
}
// Emit a masked pternlog intrinsic. This only exists because the header has to
// use a macro and we aren't able to pass the input argument to a pternlog
// builtin and a select builtin without evaluating it twice.
static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
ArrayRef<Value *> Ops) {
llvm::Type *Ty = Ops[0]->getType();
unsigned VecWidth = Ty->getPrimitiveSizeInBits();
unsigned EltWidth = Ty->getScalarSizeInBits();
Intrinsic::ID IID;
if (VecWidth == 128 && EltWidth == 32)
IID = Intrinsic::x86_avx512_pternlog_d_128;
else if (VecWidth == 256 && EltWidth == 32)
IID = Intrinsic::x86_avx512_pternlog_d_256;
else if (VecWidth == 512 && EltWidth == 32)
IID = Intrinsic::x86_avx512_pternlog_d_512;
else if (VecWidth == 128 && EltWidth == 64)
IID = Intrinsic::x86_avx512_pternlog_q_128;
else if (VecWidth == 256 && EltWidth == 64)
IID = Intrinsic::x86_avx512_pternlog_q_256;
else if (VecWidth == 512 && EltWidth == 64)
IID = Intrinsic::x86_avx512_pternlog_q_512;
else
llvm_unreachable("Unexpected intrinsic");
Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
Ops.drop_back());
Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
}
static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
llvm::Type *DstTy) {
unsigned NumberOfElements = DstTy->getVectorNumElements();
Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
}
Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
return EmitX86CpuIs(CPUStr);
}
Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
llvm::Type *Int32Ty = Builder.getInt32Ty();
// Matching the struct layout from the compiler-rt/libgcc structure that is
// filled in:
// unsigned int __cpu_vendor;
// unsigned int __cpu_type;
// unsigned int __cpu_subtype;
// unsigned int __cpu_features[1];
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
llvm::ArrayType::get(Int32Ty, 1));
// Grab the global __cpu_model.
llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
// Calculate the index needed to access the correct field based on the
// range. Also adjust the expected value.
unsigned Index;
unsigned Value;
std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
#define X86_VENDOR(ENUM, STRING) \
.Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
#define X86_CPU_TYPE_COMPAT_WITH_ALIAS(ARCHNAME, ENUM, STR, ALIAS) \
.Cases(STR, ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
#define X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR) \
.Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
#define X86_CPU_SUBTYPE_COMPAT(ARCHNAME, ENUM, STR) \
.Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
#include "llvm/Support/X86TargetParser.def"
.Default({0, 0});
assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
// Grab the appropriate field from __cpu_model.
llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
ConstantInt::get(Int32Ty, Index)};
llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4));
// Check the value of the field against the requested value.
return Builder.CreateICmpEQ(CpuValue,
llvm::ConstantInt::get(Int32Ty, Value));
}
Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
return EmitX86CpuSupports(FeatureStr);
}
uint32_t
CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
// Processor features and mapping to processor feature value.
uint32_t FeaturesMask = 0;
for (const StringRef &FeatureStr : FeatureStrs) {
unsigned Feature =
StringSwitch<unsigned>(FeatureStr)
#define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, VAL)
#include "llvm/Support/X86TargetParser.def"
;
FeaturesMask |= (1U << Feature);
}
return FeaturesMask;
}
Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
}
llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint32_t FeaturesMask) {
// Matching the struct layout from the compiler-rt/libgcc structure that is
// filled in:
// unsigned int __cpu_vendor;
// unsigned int __cpu_type;
// unsigned int __cpu_subtype;
// unsigned int __cpu_features[1];
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
llvm::ArrayType::get(Int32Ty, 1));
// Grab the global __cpu_model.
llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
// Grab the first (0th) element from the field __cpu_features off of the
// global in the struct STy.
Value *Idxs[] = {ConstantInt::get(Int32Ty, 0), ConstantInt::get(Int32Ty, 3),
ConstantInt::get(Int32Ty, 0)};
Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
Value *Features =
Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
// Check the value of the bit corresponding to the feature requested.
- Value *Bitset = Builder.CreateAnd(
- Features, llvm::ConstantInt::get(Int32Ty, FeaturesMask));
- return Builder.CreateICmpNE(Bitset, llvm::ConstantInt::get(Int32Ty, 0));
+ Value *Mask = Builder.getInt32(FeaturesMask);
+ Value *Bitset = Builder.CreateAnd(Features, Mask);
+ return Builder.CreateICmpEQ(Bitset, Mask);
}
Value *CodeGenFunction::EmitX86CpuInit() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
/*Variadic*/ false);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
return Builder.CreateCall(Func);
}
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (BuiltinID == X86::BI__builtin_cpu_is)
return EmitX86CpuIs(E);
if (BuiltinID == X86::BI__builtin_cpu_supports)
return EmitX86CpuSupports(E);
if (BuiltinID == X86::BI__builtin_cpu_init)
return EmitX86CpuInit();
SmallVector<Value*, 4> Ops;
// Find out if any arguments are required to be integer constant expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
// If this is a normal argument, just emit it as a scalar.
if ((ICEArguments & (1 << i)) == 0) {
Ops.push_back(EmitScalarExpr(E->getArg(i)));
continue;
}
// If this is required to be a constant, constant fold it so that we know
// that the generated intrinsic gets a ConstantInt.
llvm::APSInt Result;
bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
}
// These exist so that the builtin that takes an immediate can be bounds
// checked by clang to avoid passing bad immediates to the backend. Since
// AVX has a larger immediate than SSE we would need separate builtins to
// do the different bounds checking. Rather than create a clang specific
// SSE only builtin, this implements eight separate builtins to match gcc
// implementation.
auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops);
};
// For the vector forms of FP comparisons, translate the builtins directly to
// IR.
// TODO: The builtins could be removed if the SSE header files used vector
// extension comparisons directly (vector ordered/unordered may need
// additional support via __builtin_isnan()).
auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred) {
Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
return Builder.CreateBitCast(Sext, FPVecTy);
};
switch (BuiltinID) {
default: return nullptr;
case X86::BI_mm_prefetch: {
Value *Address = Ops[0];
ConstantInt *C = cast<ConstantInt>(Ops[1]);
Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
Value *Data = ConstantInt::get(Int32Ty, 1);
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, Data});
}
case X86::BI_mm_clflush: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
Ops[0]);
}
case X86::BI_mm_lfence: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
}
case X86::BI_mm_mfence: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
}
case X86::BI_mm_sfence: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
}
case X86::BI_mm_pause: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
}
case X86::BI__rdtsc: {
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
}
case X86::BI__builtin_ia32_undef128:
case X86::BI__builtin_ia32_undef256:
case X86::BI__builtin_ia32_undef512:
// The x86 definition of "undef" is not the same as the LLVM definition
// (PR32176). We leave optimizing away an unnecessary zero constant to the
// IR optimizer and backend.
// TODO: If we had a "freeze" IR instruction to generate a fixed undef
// value, we should use that here instead of a zero.
return llvm::Constant::getNullValue(ConvertType(E->getType()));
case X86::BI__builtin_ia32_vec_init_v8qi:
case X86::BI__builtin_ia32_vec_init_v4hi:
case X86::BI__builtin_ia32_vec_init_v2si:
return Builder.CreateBitCast(BuildVector(Ops),
llvm::Type::getX86_MMXTy(getLLVMContext()));
case X86::BI__builtin_ia32_vec_ext_v2si:
case X86::BI__builtin_ia32_vec_ext_v16qi:
case X86::BI__builtin_ia32_vec_ext_v8hi:
case X86::BI__builtin_ia32_vec_ext_v4si:
case X86::BI__builtin_ia32_vec_ext_v4sf:
case X86::BI__builtin_ia32_vec_ext_v2di:
case X86::BI__builtin_ia32_vec_ext_v32qi:
case X86::BI__builtin_ia32_vec_ext_v16hi:
case X86::BI__builtin_ia32_vec_ext_v8si:
case X86::BI__builtin_ia32_vec_ext_v4di: {
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
Index &= NumElts - 1;
// These builtins exist so we can ensure the index is an ICE and in range.
// Otherwise we could just do this in the header file.
return Builder.CreateExtractElement(Ops[0], Index);
}
case X86::BI__builtin_ia32_vec_set_v16qi:
case X86::BI__builtin_ia32_vec_set_v8hi:
case X86::BI__builtin_ia32_vec_set_v4si:
case X86::BI__builtin_ia32_vec_set_v2di:
case X86::BI__builtin_ia32_vec_set_v32qi:
case X86::BI__builtin_ia32_vec_set_v16hi:
case X86::BI__builtin_ia32_vec_set_v8si:
case X86::BI__builtin_ia32_vec_set_v4di: {
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
Index &= NumElts - 1;
// These builtins exist so we can ensure the index is an ICE and in range.
// Otherwise we could just do this in the header file.
return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
}
case X86::BI_mm_setcsr:
case X86::BI__builtin_ia32_ldmxcsr: {
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
}
case X86::BI_mm_getcsr:
case X86::BI__builtin_ia32_stmxcsr: {
Address Tmp = CreateMemTemp(E->getType());
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
}
case X86::BI__builtin_ia32_xsave:
case X86::BI__builtin_ia32_xsave64:
case X86::BI__builtin_ia32_xrstor:
case X86::BI__builtin_ia32_xrstor64:
case X86::BI__builtin_ia32_xsaveopt:
case X86::BI__builtin_ia32_xsaveopt64:
case X86::BI__builtin_ia32_xrstors:
case X86::BI__builtin_ia32_xrstors64:
case X86::BI__builtin_ia32_xsavec:
case X86::BI__builtin_ia32_xsavec64:
case X86::BI__builtin_ia32_xsaves:
case X86::BI__builtin_ia32_xsaves64: {
Intrinsic::ID ID;
#define INTRINSIC_X86_XSAVE_ID(NAME) \
case X86::BI__builtin_ia32_##NAME: \
ID = Intrinsic::x86_##NAME; \
break
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
INTRINSIC_X86_XSAVE_ID(xsave);
INTRINSIC_X86_XSAVE_ID(xsave64);
INTRINSIC_X86_XSAVE_ID(xrstor);
INTRINSIC_X86_XSAVE_ID(xrstor64);
INTRINSIC_X86_XSAVE_ID(xsaveopt);
INTRINSIC_X86_XSAVE_ID(xsaveopt64);
INTRINSIC_X86_XSAVE_ID(xrstors);
INTRINSIC_X86_XSAVE_ID(xrstors64);
INTRINSIC_X86_XSAVE_ID(xsavec);
INTRINSIC_X86_XSAVE_ID(xsavec64);
INTRINSIC_X86_XSAVE_ID(xsaves);
INTRINSIC_X86_XSAVE_ID(xsaves64);
}
#undef INTRINSIC_X86_XSAVE_ID
Value *Mhi = Builder.CreateTrunc(
Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
Ops[1] = Mhi;
Ops.push_back(Mlo);
return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
}
case X86::BI__builtin_ia32_storedqudi128_mask:
case X86::BI__builtin_ia32_storedqusi128_mask:
case X86::BI__builtin_ia32_storedquhi128_mask:
case X86::BI__builtin_ia32_storedquqi128_mask:
case X86::BI__builtin_ia32_storeupd128_mask:
case X86::BI__builtin_ia32_storeups128_mask:
case X86::BI__builtin_ia32_storedqudi256_mask:
case X86::BI__builtin_ia32_storedqusi256_mask:
case X86::BI__builtin_ia32_storedquhi256_mask:
case X86::BI__builtin_ia32_storedquqi256_mask:
case X86::BI__builtin_ia32_storeupd256_mask:
case X86::BI__builtin_ia32_storeups256_mask:
case X86::BI__builtin_ia32_storedqudi512_mask:
case X86::BI__builtin_ia32_storedqusi512_mask:
case X86::BI__builtin_ia32_storedquhi512_mask:
case X86::BI__builtin_ia32_storedquqi512_mask:
case X86::BI__builtin_ia32_storeupd512_mask:
case X86::BI__builtin_ia32_storeups512_mask:
return EmitX86MaskedStore(*this, Ops, 1);
case X86::BI__builtin_ia32_storess128_mask:
case X86::BI__builtin_ia32_storesd128_mask: {
return EmitX86MaskedStore(*this, Ops, 1);
}
case X86::BI__builtin_ia32_vpopcntb_128:
case X86::BI__builtin_ia32_vpopcntd_128:
case X86::BI__builtin_ia32_vpopcntq_128:
case X86::BI__builtin_ia32_vpopcntw_128:
case X86::BI__builtin_ia32_vpopcntb_256:
case X86::BI__builtin_ia32_vpopcntd_256:
case X86::BI__builtin_ia32_vpopcntq_256:
case X86::BI__builtin_ia32_vpopcntw_256:
case X86::BI__builtin_ia32_vpopcntb_512:
case X86::BI__builtin_ia32_vpopcntd_512:
case X86::BI__builtin_ia32_vpopcntq_512:
case X86::BI__builtin_ia32_vpopcntw_512: {
llvm::Type *ResultType = ConvertType(E->getType());
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
return Builder.CreateCall(F, Ops);
}
case X86::BI__builtin_ia32_cvtmask2b128:
case X86::BI__builtin_ia32_cvtmask2b256:
case X86::BI__builtin_ia32_cvtmask2b512:
case X86::BI__builtin_ia32_cvtmask2w128:
case X86::BI__builtin_ia32_cvtmask2w256:
case X86::BI__builtin_ia32_cvtmask2w512:
case X86::BI__builtin_ia32_cvtmask2d128:
case X86::BI__builtin_ia32_cvtmask2d256:
case X86::BI__builtin_ia32_cvtmask2d512:
case X86::BI__builtin_ia32_cvtmask2q128:
case X86::BI__builtin_ia32_cvtmask2q256:
case X86::BI__builtin_ia32_cvtmask2q512:
return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
case X86::BI__builtin_ia32_cvtb2mask128:
case X86::BI__builtin_ia32_cvtb2mask256:
case X86::BI__builtin_ia32_cvtb2mask512:
case X86::BI__builtin_ia32_cvtw2mask128:
case X86::BI__builtin_ia32_cvtw2mask256:
case X86::BI__builtin_ia32_cvtw2mask512:
case X86::BI__builtin_ia32_cvtd2mask128:
case X86::BI__builtin_ia32_cvtd2mask256:
case X86::BI__builtin_ia32_cvtd2mask512:
case X86::BI__builtin_ia32_cvtq2mask128:
case X86::BI__builtin_ia32_cvtq2mask256:
case X86::BI__builtin_ia32_cvtq2mask512:
return EmitX86ConvertToMask(*this, Ops[0]);
case X86::BI__builtin_ia32_vfmaddss3:
case X86::BI__builtin_ia32_vfmaddsd3:
case X86::BI__builtin_ia32_vfmaddss3_mask:
case X86::BI__builtin_ia32_vfmaddsd3_mask:
return EmitScalarFMAExpr(*this, Ops, Ops[0]);
case X86::BI__builtin_ia32_vfmaddss:
case X86::BI__builtin_ia32_vfmaddsd:
return EmitScalarFMAExpr(*this, Ops,
Constant::getNullValue(Ops[0]->getType()));
case X86::BI__builtin_ia32_vfmaddss3_maskz:
case X86::BI__builtin_ia32_vfmaddsd3_maskz:
return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true);
case X86::BI__builtin_ia32_vfmaddss3_mask3:
case X86::BI__builtin_ia32_vfmaddsd3_mask3:
return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2);
case X86::BI__builtin_ia32_vfmsubss3_mask3:
case X86::BI__builtin_ia32_vfmsubsd3_mask3:
return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2,
/*NegAcc*/true);
case X86::BI__builtin_ia32_vfmaddps:
case X86::BI__builtin_ia32_vfmaddpd:
case X86::BI__builtin_ia32_vfmaddps256:
case X86::BI__builtin_ia32_vfmaddpd256:
case X86::BI__builtin_ia32_vfmaddps512_mask:
case X86::BI__builtin_ia32_vfmaddps512_maskz:
case X86::BI__builtin_ia32_vfmaddps512_mask3:
case X86::BI__builtin_ia32_vfmsubps512_mask3:
case X86::BI__builtin_ia32_vfmaddpd512_mask:
case X86::BI__builtin_ia32_vfmaddpd512_maskz:
case X86::BI__builtin_ia32_vfmaddpd512_mask3:
case X86::BI__builtin_ia32_vfmsubpd512_mask3:
return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
case X86::BI__builtin_ia32_vfmaddsubps:
case X86::BI__builtin_ia32_vfmaddsubpd:
case X86::BI__builtin_ia32_vfmaddsubps256:
case X86::BI__builtin_ia32_vfmaddsubpd256:
case X86::BI__builtin_ia32_vfmaddsubps512_mask:
case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true);
case X86::BI__builtin_ia32_movdqa32store128_mask:
case X86::BI__builtin_ia32_movdqa64store128_mask:
case X86::BI__builtin_ia32_storeaps128_mask:
case X86::BI__builtin_ia32_storeapd128_mask:
case X86::BI__builtin_ia32_movdqa32store256_mask:
case X86::BI__builtin_ia32_movdqa64store256_mask:
case X86::BI__builtin_ia32_storeaps256_mask:
case X86::BI__builtin_ia32_storeapd256_mask:
case X86::BI__builtin_ia32_movdqa32store512_mask:
case X86::BI__builtin_ia32_movdqa64store512_mask:
case X86::BI__builtin_ia32_storeaps512_mask:
case X86::BI__builtin_ia32_storeapd512_mask: {
unsigned Align =
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
return EmitX86MaskedStore(*this, Ops, Align);
}
case X86::BI__builtin_ia32_loadups128_mask:
case X86::BI__builtin_ia32_loadups256_mask:
case X86::BI__builtin_ia32_loadups512_mask:
case X86::BI__builtin_ia32_loadupd128_mask:
case X86::BI__builtin_ia32_loadupd256_mask:
case X86::BI__builtin_ia32_loadupd512_mask:
case X86::BI__builtin_ia32_loaddquqi128_mask:
case X86::BI__builtin_ia32_loaddquqi256_mask:
case X86::BI__builtin_ia32_loaddquqi512_mask:
case X86::BI__builtin_ia32_loaddquhi128_mask:
case X86::BI__builtin_ia32_loaddquhi256_mask:
case X86::BI__builtin_ia32_loaddquhi512_mask:
case X86::BI__builtin_ia32_loaddqusi128_mask:
case X86::BI__builtin_ia32_loaddqusi256_mask:
case X86::BI__builtin_ia32_loaddqusi512_mask:
case X86::BI__builtin_ia32_loaddqudi128_mask:
case X86::BI__builtin_ia32_loaddqudi256_mask:
case X86::BI__builtin_ia32_loaddqudi512_mask:
return EmitX86MaskedLoad(*this, Ops, 1);
case X86::BI__builtin_ia32_loadss128_mask:
case X86::BI__builtin_ia32_loadsd128_mask:
return EmitX86MaskedLoad(*this, Ops, 1);
case X86::BI__builtin_ia32_loadaps128_mask:
case X86::BI__builtin_ia32_loadaps256_mask:
case X86::BI__builtin_ia32_loadaps512_mask:
case X86::BI__builtin_ia32_loadapd128_mask:
case X86::BI__builtin_ia32_loadapd256_mask:
case X86::BI__builtin_ia32_loadapd512_mask:
case X86::BI__builtin_ia32_movdqa32load128_mask:
case X86::BI__builtin_ia32_movdqa32load256_mask:
case X86::BI__builtin_ia32_movdqa32load512_mask:
case X86::BI__builtin_ia32_movdqa64load128_mask:
case X86::BI__builtin_ia32_movdqa64load256_mask:
case X86::BI__builtin_ia32_movdqa64load512_mask: {
unsigned Align =
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
return EmitX86MaskedLoad(*this, Ops, Align);
}
case X86::BI__builtin_ia32_expandloaddf128_mask:
case X86::BI__builtin_ia32_expandloaddf256_mask:
case X86::BI__builtin_ia32_expandloaddf512_mask:
case X86::BI__builtin_ia32_expandloadsf128_mask:
case X86::BI__builtin_ia32_expandloadsf256_mask:
case X86::BI__builtin_ia32_expandloadsf512_mask:
case X86::BI__builtin_ia32_expandloaddi128_mask:
case X86::BI__builtin_ia32_expandloaddi256_mask:
case X86::BI__builtin_ia32_expandloaddi512_mask:
case X86::BI__builtin_ia32_expandloadsi128_mask:
case X86::BI__builtin_ia32_expandloadsi256_mask:
case X86::BI__builtin_ia32_expandloadsi512_mask:
case X86::BI__builtin_ia32_expandloadhi128_mask:
case X86::BI__builtin_ia32_expandloadhi256_mask:
case X86::BI__builtin_ia32_expandloadhi512_mask:
case X86::BI__builtin_ia32_expandloadqi128_mask:
case X86::BI__builtin_ia32_expandloadqi256_mask:
case X86::BI__builtin_ia32_expandloadqi512_mask:
return EmitX86ExpandLoad(*this, Ops);
case X86::BI__builtin_ia32_compressstoredf128_mask:
case X86::BI__builtin_ia32_compressstoredf256_mask:
case X86::BI__builtin_ia32_compressstoredf512_mask:
case X86::BI__builtin_ia32_compressstoresf128_mask:
case X86::BI__builtin_ia32_compressstoresf256_mask:
case X86::BI__builtin_ia32_compressstoresf512_mask:
case X86::BI__builtin_ia32_compressstoredi128_mask:
case X86::BI__builtin_ia32_compressstoredi256_mask:
case X86::BI__builtin_ia32_compressstoredi512_mask:
case X86::BI__builtin_ia32_compressstoresi128_mask:
case X86::BI__builtin_ia32_compressstoresi256_mask:
case X86::BI__builtin_ia32_compressstoresi512_mask:
case X86::BI__builtin_ia32_compressstorehi128_mask:
case X86::BI__builtin_ia32_compressstorehi256_mask:
case X86::BI__builtin_ia32_compressstorehi512_mask:
case X86::BI__builtin_ia32_compressstoreqi128_mask:
case X86::BI__builtin_ia32_compressstoreqi256_mask:
case X86::BI__builtin_ia32_compressstoreqi512_mask:
return EmitX86CompressStore(*this, Ops);
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
// cast val v2i64
Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
// extract (0, 1)
unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
Ops[1] = Builder.CreateExtractElement(Ops[1], Index, "extract");
// cast pointer to i64 & store
Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
}
case X86::BI__builtin_ia32_vextractf128_pd256:
case X86::BI__builtin_ia32_vextractf128_ps256:
case X86::BI__builtin_ia32_vextractf128_si256:
case X86::BI__builtin_ia32_extract128i256:
case X86::BI__builtin_ia32_extractf64x4_mask:
case X86::BI__builtin_ia32_extractf32x4_mask:
case X86::BI__builtin_ia32_extracti64x4_mask:
case X86::BI__builtin_ia32_extracti32x4_mask:
case X86::BI__builtin_ia32_extractf32x8_mask:
case X86::BI__builtin_ia32_extracti32x8_mask:
case X86::BI__builtin_ia32_extractf32x4_256_mask:
case X86::BI__builtin_ia32_extracti32x4_256_mask:
case X86::BI__builtin_ia32_extractf64x2_256_mask:
case X86::BI__builtin_ia32_extracti64x2_256_mask:
case X86::BI__builtin_ia32_extractf64x2_512_mask:
case X86::BI__builtin_ia32_extracti64x2_512_mask: {
llvm::Type *DstTy = ConvertType(E->getType());
unsigned NumElts = DstTy->getVectorNumElements();
unsigned SrcNumElts = Ops[0]->getType()->getVectorNumElements();
unsigned SubVectors = SrcNumElts / NumElts;
unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
Index &= SubVectors - 1; // Remove any extra bits.
Index *= NumElts;
uint32_t Indices[16];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + Index;
Value *Res = Builder.CreateShuffleVector(Ops[0],
UndefValue::get(Ops[0]->getType()),
makeArrayRef(Indices, NumElts),
"extract");
if (Ops.size() == 4)
Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
return Res;
}
case X86::BI__builtin_ia32_vinsertf128_pd256:
case X86::BI__builtin_ia32_vinsertf128_ps256:
case X86::BI__builtin_ia32_vinsertf128_si256:
case X86::BI__builtin_ia32_insert128i256:
case X86::BI__builtin_ia32_insertf64x4:
case X86::BI__builtin_ia32_insertf32x4:
case X86::BI__builtin_ia32_inserti64x4:
case X86::BI__builtin_ia32_inserti32x4:
case X86::BI__builtin_ia32_insertf32x8:
case X86::BI__builtin_ia32_inserti32x8:
case X86::BI__builtin_ia32_insertf32x4_256:
case X86::BI__builtin_ia32_inserti32x4_256:
case X86::BI__builtin_ia32_insertf64x2_256:
case X86::BI__builtin_ia32_inserti64x2_256:
case X86::BI__builtin_ia32_insertf64x2_512:
case X86::BI__builtin_ia32_inserti64x2_512: {
unsigned DstNumElts = Ops[0]->getType()->getVectorNumElements();
unsigned SrcNumElts = Ops[1]->getType()->getVectorNumElements();
unsigned SubVectors = DstNumElts / SrcNumElts;
unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
Index &= SubVectors - 1; // Remove any extra bits.
Index *= SrcNumElts;
uint32_t Indices[16];
for (unsigned i = 0; i != DstNumElts; ++i)
Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
Value *Op1 = Builder.CreateShuffleVector(Ops[1],
UndefValue::get(Ops[1]->getType()),
makeArrayRef(Indices, DstNumElts),
"widen");
for (unsigned i = 0; i != DstNumElts; ++i) {
if (i >= Index && i < (Index + SrcNumElts))
Indices[i] = (i - Index) + DstNumElts;
else
Indices[i] = i;
}
return Builder.CreateShuffleVector(Ops[0], Op1,
makeArrayRef(Indices, DstNumElts),
"insert");
}
case X86::BI__builtin_ia32_pmovqd512_mask:
case X86::BI__builtin_ia32_pmovwb512_mask: {
Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
return EmitX86Select(*this, Ops[2], Res, Ops[1]);
}
case X86::BI__builtin_ia32_pmovdb512_mask:
case X86::BI__builtin_ia32_pmovdw512_mask:
case X86::BI__builtin_ia32_pmovqw512_mask: {
if (const auto *C = dyn_cast<Constant>(Ops[2]))
if (C->isAllOnesValue())
return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
Intrinsic::ID IID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_pmovdb512_mask:
IID = Intrinsic::x86_avx512_mask_pmov_db_512;
break;
case X86::BI__builtin_ia32_pmovdw512_mask:
IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
break;
case X86::BI__builtin_ia32_pmovqw512_mask:
IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
break;
}
Function *Intr = CGM.getIntrinsic(IID);
return Builder.CreateCall(Intr, Ops);
}
case X86::BI__builtin_ia32_pblendw128:
case X86::BI__builtin_ia32_blendpd:
case X86::BI__builtin_ia32_blendps:
case X86::BI__builtin_ia32_blendpd256:
case X86::BI__builtin_ia32_blendps256:
case X86::BI__builtin_ia32_pblendw256:
case X86::BI__builtin_ia32_pblendd128:
case X86::BI__builtin_ia32_pblendd256: {
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
uint32_t Indices[16];
// If there are more than 8 elements, the immediate is used twice so make
// sure we handle that.
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
return Builder.CreateShuffleVector(Ops[0], Ops[1],
makeArrayRef(Indices, NumElts),
"blend");
}
case X86::BI__builtin_ia32_pshuflw:
case X86::BI__builtin_ia32_pshuflw256:
case X86::BI__builtin_ia32_pshuflw512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
llvm::Type *Ty = Ops[0]->getType();
unsigned NumElts = Ty->getVectorNumElements();
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
uint32_t Indices[32];
for (unsigned l = 0; l != NumElts; l += 8) {
for (unsigned i = 0; i != 4; ++i) {
Indices[l + i] = l + (Imm & 3);
Imm >>= 2;
}
for (unsigned i = 4; i != 8; ++i)
Indices[l + i] = l + i;
}
return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
makeArrayRef(Indices, NumElts),
"pshuflw");
}
case X86::BI__builtin_ia32_pshufhw:
case X86::BI__builtin_ia32_pshufhw256:
case X86::BI__builtin_ia32_pshufhw512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
llvm::Type *Ty = Ops[0]->getType();
unsigned NumElts = Ty->getVectorNumElements();
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
uint32_t Indices[32];
for (unsigned l = 0; l != NumElts; l += 8) {
for (unsigned i = 0; i != 4; ++i)
Indices[l + i] = l + i;
for (unsigned i = 4; i != 8; ++i) {
Indices[l + i] = l + 4 + (Imm & 3);
Imm >>= 2;
}
}
return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
makeArrayRef(Indices, NumElts),
"pshufhw");
}
case X86::BI__builtin_ia32_pshufd:
case X86::BI__builtin_ia32_pshufd256:
case X86::BI__builtin_ia32_pshufd512:
case X86::BI__builtin_ia32_vpermilpd:
case X86::BI__builtin_ia32_vpermilps:
case X86::BI__builtin_ia32_vpermilpd256:
case X86::BI__builtin_ia32_vpermilps256:
case X86::BI__builtin_ia32_vpermilpd512:
case X86::BI__builtin_ia32_vpermilps512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
llvm::Type *Ty = Ops[0]->getType();
unsigned NumElts = Ty->getVectorNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
uint32_t Indices[16];
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
for (unsigned i = 0; i != NumLaneElts; ++i) {
Indices[i + l] = (Imm % NumLaneElts) + l;
Imm /= NumLaneElts;
}
}
return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
makeArrayRef(Indices, NumElts),
"permil");
}
case X86::BI__builtin_ia32_shufpd:
case X86::BI__builtin_ia32_shufpd256:
case X86::BI__builtin_ia32_shufpd512:
case X86::BI__builtin_ia32_shufps:
case X86::BI__builtin_ia32_shufps256:
case X86::BI__builtin_ia32_shufps512: {
uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
llvm::Type *Ty = Ops[0]->getType();
unsigned NumElts = Ty->getVectorNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
unsigned NumLaneElts = NumElts / NumLanes;
// Splat the 8-bits of immediate 4 times to help the loop wrap around.
Imm = (Imm & 0xff) * 0x01010101;
uint32_t Indices[16];
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
for (unsigned i = 0; i != NumLaneElts; ++i) {
unsigned Index = Imm % NumLaneElts;
Imm /= NumLaneElts;
if (i >= (NumLaneElts / 2))
Index += NumElts;
Indices[l + i] = l + Index;
}
}
return Builder.CreateShuffleVector(Ops[0], Ops[1],
makeArrayRef(Indices, NumElts),
"shufp");
}
case X86::BI__builtin_ia32_permdi256:
case X86::BI__builtin_ia32_permdf256:
case X86::BI__builtin_ia32_permdi512:
case X86::BI__builtin_ia32_permdf512: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
llvm::Type *Ty = Ops[0]->getType();
unsigned NumElts = Ty->getVectorNumElements();
// These intrinsics operate on 256-bit lanes of four 64-bit elements.
uint32_t Indices[8];
for (unsigned l = 0; l != NumElts; l += 4)
for (unsigned i = 0; i != 4; ++i)
Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
makeArrayRef(Indices, NumElts),
"perm");
}
case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr256:
case X86::BI__builtin_ia32_palignr512: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
assert(NumElts % 16 == 0);
// If palignr is shifting the pair of vectors more than the size of two
// lanes, emit zero.
if (ShiftVal >= 32)
return llvm::Constant::getNullValue(ConvertType(E->getType()));
// If palignr is shifting the pair of input vectors more than one lane,
// but less than two lanes, convert to shifting in zeroes.
if (ShiftVal > 16) {
ShiftVal -= 16;
Ops[1] = Ops[0];
Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
}
uint32_t Indices[64];
// 256-bit palignr operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
unsigned Idx = ShiftVal + i;
if (Idx >= 16)
Idx += NumElts - 16; // End of lane, switch operand.
Indices[l + i] = Idx + l;
}
}
return Builder.CreateShuffleVector(Ops[1], Ops[0],
makeArrayRef(Indices, NumElts),
"palignr");
}
case X86::BI__builtin_ia32_alignd128:
case X86::BI__builtin_ia32_alignd256:
case X86::BI__builtin_ia32_alignd512:
case X86::BI__builtin_ia32_alignq128:
case X86::BI__builtin_ia32_alignq256:
case X86::BI__builtin_ia32_alignq512: {
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
// Mask the shift amount to width of two vectors.
ShiftVal &= (2 * NumElts) - 1;
uint32_t Indices[16];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i + ShiftVal;
return Builder.CreateShuffleVector(Ops[1], Ops[0],
makeArrayRef(Indices, NumElts),
"valign");
}
case X86::BI__builtin_ia32_shuf_f32x4_256:
case X86::BI__builtin_ia32_shuf_f64x2_256:
case X86::BI__builtin_ia32_shuf_i32x4_256:
case X86::BI__builtin_ia32_shuf_i64x2_256:
case X86::BI__builtin_ia32_shuf_f32x4:
case X86::BI__builtin_ia32_shuf_f64x2:
case X86::BI__builtin_ia32_shuf_i32x4:
case X86::BI__builtin_ia32_shuf_i64x2: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
llvm::Type *Ty = Ops[0]->getType();
unsigned NumElts = Ty->getVectorNumElements();
unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
unsigned NumLaneElts = NumElts / NumLanes;
uint32_t Indices[16];
for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
unsigned Index = (Imm % NumLanes) * NumLaneElts;
Imm /= NumLanes; // Discard the bits we just used.
if (l >= (NumElts / 2))
Index += NumElts; // Switch to other source.
for (unsigned i = 0; i != NumLaneElts; ++i) {
Indices[l + i] = Index + i;
}
}
return Builder.CreateShuffleVector(Ops[0], Ops[1],
makeArrayRef(Indices, NumElts),
"shuf");
}
case X86::BI__builtin_ia32_vperm2f128_pd256:
case X86::BI__builtin_ia32_vperm2f128_ps256:
case X86::BI__builtin_ia32_vperm2f128_si256:
case X86::BI__builtin_ia32_permti256: {
unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
// This takes a very simple approach since there are two lanes and a
// shuffle can have 2 inputs. So we reserve the first input for the first
// lane and the second input for the second lane. This may result in
// duplicate sources, but this can be dealt with in the backend.
Value *OutOps[2];
uint32_t Indices[8];
for (unsigned l = 0; l != 2; ++l) {
// Determine the source for this lane.
if (Imm & (1 << ((l * 4) + 3)))
OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
else if (Imm & (1 << ((l * 4) + 1)))
OutOps[l] = Ops[1];
else
OutOps[l] = Ops[0];
for (unsigned i = 0; i != NumElts/2; ++i) {
// Start with ith element of the source for this lane.
unsigned Idx = (l * NumElts) + i;
// If bit 0 of the immediate half is set, switch to the high half of
// the source.
if (Imm & (1 << (l * 4)))
Idx += NumElts/2;
Indices[(l * (NumElts/2)) + i] = Idx;
}
}
return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
makeArrayRef(Indices, NumElts),
"vperm");
}
case X86::BI__builtin_ia32_pslldqi128_byteshift:
case X86::BI__builtin_ia32_pslldqi256_byteshift:
case X86::BI__builtin_ia32_pslldqi512_byteshift: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
llvm::Type *ResultType = Ops[0]->getType();
// Builtin type is vXi64 so multiply by 8 to get bytes.
unsigned NumElts = ResultType->getVectorNumElements() * 8;
// If pslldq is shifting the vector more than 15 bytes, emit zero.
if (ShiftVal >= 16)
return llvm::Constant::getNullValue(ResultType);
uint32_t Indices[64];
// 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
unsigned Idx = NumElts + i - ShiftVal;
if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
Indices[l + i] = Idx + l;
}
}
llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
Value *SV = Builder.CreateShuffleVector(Zero, Cast,
makeArrayRef(Indices, NumElts),
"pslldq");
return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
}
case X86::BI__builtin_ia32_psrldqi128_byteshift:
case X86::BI__builtin_ia32_psrldqi256_byteshift:
case X86::BI__builtin_ia32_psrldqi512_byteshift: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
llvm::Type *ResultType = Ops[0]->getType();
// Builtin type is vXi64 so multiply by 8 to get bytes.
unsigned NumElts = ResultType->getVectorNumElements() * 8;
// If psrldq is shifting the vector more than 15 bytes, emit zero.
if (ShiftVal >= 16)
return llvm::Constant::getNullValue(ResultType);
uint32_t Indices[64];
// 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
for (unsigned l = 0; l != NumElts; l += 16) {
for (unsigned i = 0; i != 16; ++i) {
unsigned Idx = i + ShiftVal;
if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
Indices[l + i] = Idx + l;
}
}
llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
Value *Zero = llvm::Constant::getNullValue(VecTy);
Value *SV = Builder.CreateShuffleVector(Cast, Zero,
makeArrayRef(Indices, NumElts),
"psrldq");
return Builder.CreateBitCast(SV, ResultType, "cast");
}
case X86::BI__builtin_ia32_movnti:
case X86::BI__builtin_ia32_movnti64:
case X86::BI__builtin_ia32_movntsd:
case X86::BI__builtin_ia32_movntss: {
llvm::MDNode *Node = llvm::MDNode::get(
getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
Value *Ptr = Ops[0];
Value *Src = Ops[1];
// Extract the 0'th element of the source vector.
if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
BuiltinID == X86::BI__builtin_ia32_movntss)
Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
// Convert the type of the pointer to a pointer to the stored type.
Value *BC = Builder.CreateBitCast(
Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
// Unaligned nontemporal store of the scalar value.
StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
SI->setAlignment(1);
return SI;
}
case X86::BI__builtin_ia32_selectb_128:
case X86::BI__builtin_ia32_selectb_256:
case X86::BI__builtin_ia32_selectb_512:
case X86::BI__builtin_ia32_selectw_128:
case X86::BI__builtin_ia32_selectw_256:
case X86::BI__builtin_ia32_selectw_512:
case X86::BI__builtin_ia32_selectd_128:
case X86::BI__builtin_ia32_selectd_256:
case X86::BI__builtin_ia32_selectd_512:
case X86::BI__builtin_ia32_selectq_128:
case X86::BI__builtin_ia32_selectq_256:
case X86::BI__builtin_ia32_selectq_512:
case X86::BI__builtin_ia32_selectps_128:
case X86::BI__builtin_ia32_selectps_256:
case X86::BI__builtin_ia32_selectps_512:
case X86::BI__builtin_ia32_selectpd_128:
case X86::BI__builtin_ia32_selectpd_256:
case X86::BI__builtin_ia32_selectpd_512:
return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
case X86::BI__builtin_ia32_selectss_128:
case X86::BI__builtin_ia32_selectsd_128: {
Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
A = EmitX86ScalarSelect(*this, Ops[0], A, B);
return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
}
case X86::BI__builtin_ia32_cmpb128_mask:
case X86::BI__builtin_ia32_cmpb256_mask:
case X86::BI__builtin_ia32_cmpb512_mask:
case X86::BI__builtin_ia32_cmpw128_mask:
case X86::BI__builtin_ia32_cmpw256_mask:
case X86::BI__builtin_ia32_cmpw512_mask:
case X86::BI__builtin_ia32_cmpd128_mask:
case X86::BI__builtin_ia32_cmpd256_mask:
case X86::BI__builtin_ia32_cmpd512_mask:
case X86::BI__builtin_ia32_cmpq128_mask:
case X86::BI__builtin_ia32_cmpq256_mask:
case X86::BI__builtin_ia32_cmpq512_mask: {
unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
return EmitX86MaskedCompare(*this, CC, true, Ops);
}
case X86::BI__builtin_ia32_ucmpb128_mask:
case X86::BI__builtin_ia32_ucmpb256_mask:
case X86::BI__builtin_ia32_ucmpb512_mask:
case X86::BI__builtin_ia32_ucmpw128_mask:
case X86::BI__builtin_ia32_ucmpw256_mask:
case X86::BI__builtin_ia32_ucmpw512_mask:
case X86::BI__builtin_ia32_ucmpd128_mask:
case X86::BI__builtin_ia32_ucmpd256_mask:
case X86::BI__builtin_ia32_ucmpd512_mask:
case X86::BI__builtin_ia32_ucmpq128_mask:
case X86::BI__builtin_ia32_ucmpq256_mask:
case X86::BI__builtin_ia32_ucmpq512_mask: {
unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
return EmitX86MaskedCompare(*this, CC, false, Ops);
}
case X86::BI__builtin_ia32_kortestchi:
case X86::BI__builtin_ia32_kortestzhi: {
Value *Or = EmitX86MaskLogic(*this, Instruction::Or, 16, Ops);
Value *C;
if (BuiltinID == X86::BI__builtin_ia32_kortestchi)
C = llvm::Constant::getAllOnesValue(Builder.getInt16Ty());
else
C = llvm::Constant::getNullValue(Builder.getInt16Ty());
Value *Cmp = Builder.CreateICmpEQ(Or, C);
return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
}
case X86::BI__builtin_ia32_kandhi:
return EmitX86MaskLogic(*this, Instruction::And, 16, Ops);
case X86::BI__builtin_ia32_kandnhi:
return EmitX86MaskLogic(*this, Instruction::And, 16, Ops, true);
case X86::BI__builtin_ia32_korhi:
return EmitX86MaskLogic(*this, Instruction::Or, 16, Ops);
case X86::BI__builtin_ia32_kxnorhi:
return EmitX86MaskLogic(*this, Instruction::Xor, 16, Ops, true);
case X86::BI__builtin_ia32_kxorhi:
return EmitX86MaskLogic(*this, Instruction::Xor, 16, Ops);
case X86::BI__builtin_ia32_knothi: {
Ops[0] = getMaskVecValue(*this, Ops[0], 16);
return Builder.CreateBitCast(Builder.CreateNot(Ops[0]),
Builder.getInt16Ty());
}
case X86::BI__builtin_ia32_kunpckdi:
case X86::BI__builtin_ia32_kunpcksi:
case X86::BI__builtin_ia32_kunpckhi: {
unsigned NumElts = Ops[0]->getType()->getScalarSizeInBits();
Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
uint32_t Indices[64];
for (unsigned i = 0; i != NumElts; ++i)
Indices[i] = i;
// First extract half of each vector. This gives better codegen than
// doing it in a single shuffle.
LHS = Builder.CreateShuffleVector(LHS, LHS,
makeArrayRef(Indices, NumElts / 2));
RHS = Builder.CreateShuffleVector(RHS, RHS,
makeArrayRef(Indices, NumElts / 2));
// Concat the vectors.
// NOTE: Operands are swapped to match the intrinsic definition.
Value *Res = Builder.CreateShuffleVector(RHS, LHS,
makeArrayRef(Indices, NumElts));
return Builder.CreateBitCast(Res, Ops[0]->getType());
}
case X86::BI__builtin_ia32_vplzcntd_128:
case X86::BI__builtin_ia32_vplzcntd_256:
case X86::BI__builtin_ia32_vplzcntd_512:
case X86::BI__builtin_ia32_vplzcntq_128:
case X86::BI__builtin_ia32_vplzcntq_256:
case X86::BI__builtin_ia32_vplzcntq_512: {
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
}
case X86::BI__builtin_ia32_sqrtss:
case X86::BI__builtin_ia32_sqrtsd: {
Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
A = Builder.CreateCall(F, {A});
return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
}
case X86::BI__builtin_ia32_sqrtsd_round_mask:
case X86::BI__builtin_ia32_sqrtss_round_mask: {
unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
// Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
// otherwise keep the intrinsic.
if (CC != 4) {
Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
Intrinsic::x86_avx512_mask_sqrt_sd :
Intrinsic::x86_avx512_mask_sqrt_ss;
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
A = Builder.CreateCall(F, A);
Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
}
case X86::BI__builtin_ia32_sqrtpd256:
case X86::BI__builtin_ia32_sqrtpd:
case X86::BI__builtin_ia32_sqrtps256:
case X86::BI__builtin_ia32_sqrtps:
case X86::BI__builtin_ia32_sqrtps512:
case X86::BI__builtin_ia32_sqrtpd512: {
if (Ops.size() == 2) {
unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
// Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
// otherwise keep the intrinsic.
if (CC != 4) {
Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
Intrinsic::x86_avx512_sqrt_ps_512 :
Intrinsic::x86_avx512_sqrt_pd_512;
return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
}
}
Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
return Builder.CreateCall(F, Ops[0]);
}
case X86::BI__builtin_ia32_pabsb128:
case X86::BI__builtin_ia32_pabsw128:
case X86::BI__builtin_ia32_pabsd128:
case X86::BI__builtin_ia32_pabsb256:
case X86::BI__builtin_ia32_pabsw256:
case X86::BI__builtin_ia32_pabsd256:
case X86::BI__builtin_ia32_pabsq128:
case X86::BI__builtin_ia32_pabsq256:
case X86::BI__builtin_ia32_pabsb512:
case X86::BI__builtin_ia32_pabsw512:
case X86::BI__builtin_ia32_pabsd512:
case X86::BI__builtin_ia32_pabsq512:
return EmitX86Abs(*this, Ops);
case X86::BI__builtin_ia32_pmaxsb128:
case X86::BI__builtin_ia32_pmaxsw128:
case X86::BI__builtin_ia32_pmaxsd128:
case X86::BI__builtin_ia32_pmaxsq128:
case X86::BI__builtin_ia32_pmaxsb256:
case X86::BI__builtin_ia32_pmaxsw256:
case X86::BI__builtin_ia32_pmaxsd256:
case X86::BI__builtin_ia32_pmaxsq256:
case X86::BI__builtin_ia32_pmaxsb512:
case X86::BI__builtin_ia32_pmaxsw512:
case X86::BI__builtin_ia32_pmaxsd512:
case X86::BI__builtin_ia32_pmaxsq512:
return EmitX86MinMax(*this, ICmpInst::ICMP_SGT, Ops);
case X86::BI__builtin_ia32_pmaxub128:
case X86::BI__builtin_ia32_pmaxuw128:
case X86::BI__builtin_ia32_pmaxud128:
case X86::BI__builtin_ia32_pmaxuq128:
case X86::BI__builtin_ia32_pmaxub256:
case X86::BI__builtin_ia32_pmaxuw256:
case X86::BI__builtin_ia32_pmaxud256:
case X86::BI__builtin_ia32_pmaxuq256:
case X86::BI__builtin_ia32_pmaxub512:
case X86::BI__builtin_ia32_pmaxuw512:
case X86::BI__builtin_ia32_pmaxud512:
case X86::BI__builtin_ia32_pmaxuq512:
return EmitX86MinMax(*this, ICmpInst::ICMP_UGT, Ops);
case X86::BI__builtin_ia32_pminsb128:
case X86::BI__builtin_ia32_pminsw128:
case X86::BI__builtin_ia32_pminsd128:
case X86::BI__builtin_ia32_pminsq128:
case X86::BI__builtin_ia32_pminsb256:
case X86::BI__builtin_ia32_pminsw256:
case X86::BI__builtin_ia32_pminsd256:
case X86::BI__builtin_ia32_pminsq256:
case X86::BI__builtin_ia32_pminsb512:
case X86::BI__builtin_ia32_pminsw512:
case X86::BI__builtin_ia32_pminsd512:
case X86::BI__builtin_ia32_pminsq512:
return EmitX86MinMax(*this, ICmpInst::ICMP_SLT, Ops);
case X86::BI__builtin_ia32_pminub128:
case X86::BI__builtin_ia32_pminuw128:
case X86::BI__builtin_ia32_pminud128:
case X86::BI__builtin_ia32_pminuq128:
case X86::BI__builtin_ia32_pminub256:
case X86::BI__builtin_ia32_pminuw256:
case X86::BI__builtin_ia32_pminud256:
case X86::BI__builtin_ia32_pminuq256:
case X86::BI__builtin_ia32_pminub512:
case X86::BI__builtin_ia32_pminuw512:
case X86::BI__builtin_ia32_pminud512:
case X86::BI__builtin_ia32_pminuq512:
return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops);
case X86::BI__builtin_ia32_pmuludq128:
case X86::BI__builtin_ia32_pmuludq256:
case X86::BI__builtin_ia32_pmuludq512:
return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
case X86::BI__builtin_ia32_pmuldq128:
case X86::BI__builtin_ia32_pmuldq256:
case X86::BI__builtin_ia32_pmuldq512:
return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
case X86::BI__builtin_ia32_pternlogd512_mask:
case X86::BI__builtin_ia32_pternlogq512_mask:
case X86::BI__builtin_ia32_pternlogd128_mask:
case X86::BI__builtin_ia32_pternlogd256_mask:
case X86::BI__builtin_ia32_pternlogq128_mask:
case X86::BI__builtin_ia32_pternlogq256_mask:
return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
case X86::BI__builtin_ia32_pternlogd512_maskz:
case X86::BI__builtin_ia32_pternlogq512_maskz:
case X86::BI__builtin_ia32_pternlogd128_maskz:
case X86::BI__builtin_ia32_pternlogd256_maskz:
case X86::BI__builtin_ia32_pternlogq128_maskz:
case X86::BI__builtin_ia32_pternlogq256_maskz:
return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi: {
llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
return Builder.CreateCall(F, Ops, "pswapd");
}
case X86::BI__builtin_ia32_rdrand16_step:
case X86::BI__builtin_ia32_rdrand32_step:
case X86::BI__builtin_ia32_rdrand64_step:
case X86::BI__builtin_ia32_rdseed16_step:
case X86::BI__builtin_ia32_rdseed32_step:
case X86::BI__builtin_ia32_rdseed64_step: {
Intrinsic::ID ID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_rdrand16_step:
ID = Intrinsic::x86_rdrand_16;
break;
case X86::BI__builtin_ia32_rdrand32_step:
ID = Intrinsic::x86_rdrand_32;
break;
case X86::BI__builtin_ia32_rdrand64_step:
ID = Intrinsic::x86_rdrand_64;
break;
case X86::BI__builtin_ia32_rdseed16_step:
ID = Intrinsic::x86_rdseed_16;
break;
case X86::BI__builtin_ia32_rdseed32_step:
ID = Intrinsic::x86_rdseed_32;
break;
case X86::BI__builtin_ia32_rdseed64_step:
ID = Intrinsic::x86_rdseed_64;
break;
}
Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
Ops[0]);
return Builder.CreateExtractValue(Call, 1);
}
case X86::BI__builtin_ia32_fpclassps128_mask:
case X86::BI__builtin_ia32_fpclassps256_mask:
case X86::BI__builtin_ia32_fpclassps512_mask:
case X86::BI__builtin_ia32_fpclasspd128_mask:
case X86::BI__builtin_ia32_fpclasspd256_mask:
case X86::BI__builtin_ia32_fpclasspd512_mask: {
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
Value *MaskIn = Ops[2];
Ops.erase(&Ops[2]);
Intrinsic::ID ID;
switch (BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_fpclassps128_mask:
ID = Intrinsic::x86_avx512_fpclass_ps_128;
break;
case X86::BI__builtin_ia32_fpclassps256_mask:
ID = Intrinsic::x86_avx512_fpclass_ps_256;
break;
case X86::BI__builtin_ia32_fpclassps512_mask:
ID = Intrinsic::x86_avx512_fpclass_ps_512;
break;
case X86::BI__builtin_ia32_fpclasspd128_mask:
ID = Intrinsic::x86_avx512_fpclass_pd_128;
break;
case X86::BI__builtin_ia32_fpclasspd256_mask:
ID = Intrinsic::x86_avx512_fpclass_pd_256;
break;
case X86::BI__builtin_ia32_fpclasspd512_mask:
ID = Intrinsic::x86_avx512_fpclass_pd_512;
break;
}
Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
}
// packed comparison intrinsics
case X86::BI__builtin_ia32_cmpeqps:
case X86::BI__builtin_ia32_cmpeqpd:
return getVectorFCmpIR(CmpInst::FCMP_OEQ);
case X86::BI__builtin_ia32_cmpltps:
case X86::BI__builtin_ia32_cmpltpd:
return getVectorFCmpIR(CmpInst::FCMP_OLT);
case X86::BI__builtin_ia32_cmpleps:
case X86::BI__builtin_ia32_cmplepd:
return getVectorFCmpIR(CmpInst::FCMP_OLE);
case X86::BI__builtin_ia32_cmpunordps:
case X86::BI__builtin_ia32_cmpunordpd:
return getVectorFCmpIR(CmpInst::FCMP_UNO);
case X86::BI__builtin_ia32_cmpneqps:
case X86::BI__builtin_ia32_cmpneqpd:
return getVectorFCmpIR(CmpInst::FCMP_UNE);
case X86::BI__builtin_ia32_cmpnltps:
case X86::BI__builtin_ia32_cmpnltpd:
return getVectorFCmpIR(CmpInst::FCMP_UGE);
case X86::BI__builtin_ia32_cmpnleps:
case X86::BI__builtin_ia32_cmpnlepd:
return getVectorFCmpIR(CmpInst::FCMP_UGT);
case X86::BI__builtin_ia32_cmpordps:
case X86::BI__builtin_ia32_cmpordpd:
return getVectorFCmpIR(CmpInst::FCMP_ORD);
case X86::BI__builtin_ia32_cmpps:
case X86::BI__builtin_ia32_cmpps256:
case X86::BI__builtin_ia32_cmppd:
case X86::BI__builtin_ia32_cmppd256:
case X86::BI__builtin_ia32_cmpps128_mask:
case X86::BI__builtin_ia32_cmpps256_mask:
case X86::BI__builtin_ia32_cmpps512_mask:
case X86::BI__builtin_ia32_cmppd128_mask:
case X86::BI__builtin_ia32_cmppd256_mask:
case X86::BI__builtin_ia32_cmppd512_mask: {
// Lowering vector comparisons to fcmp instructions, while
// ignoring signalling behaviour requested
// ignoring rounding mode requested
// This is is only possible as long as FENV_ACCESS is not implemented.
// See also: https://reviews.llvm.org/D45616
// The third argument is the comparison condition, and integer in the
// range [0, 31]
unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
// Lowering to IR fcmp instruction.
// Ignoring requested signaling behaviour,
// e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
FCmpInst::Predicate Pred;
switch (CC) {
case 0x00: Pred = FCmpInst::FCMP_OEQ; break;
case 0x01: Pred = FCmpInst::FCMP_OLT; break;
case 0x02: Pred = FCmpInst::FCMP_OLE; break;
case 0x03: Pred = FCmpInst::FCMP_UNO; break;
case 0x04: Pred = FCmpInst::FCMP_UNE; break;
case 0x05: Pred = FCmpInst::FCMP_UGE; break;
case 0x06: Pred = FCmpInst::FCMP_UGT; break;
case 0x07: Pred = FCmpInst::FCMP_ORD; break;
case 0x08: Pred = FCmpInst::FCMP_UEQ; break;
case 0x09: Pred = FCmpInst::FCMP_ULT; break;
case 0x0a: Pred = FCmpInst::FCMP_ULE; break;
case 0x0b: Pred = FCmpInst::FCMP_FALSE; break;
case 0x0c: Pred = FCmpInst::FCMP_ONE; break;
case 0x0d: Pred = FCmpInst::FCMP_OGE; break;
case 0x0e: Pred = FCmpInst::FCMP_OGT; break;
case 0x0f: Pred = FCmpInst::FCMP_TRUE; break;
case 0x10: Pred = FCmpInst::FCMP_OEQ; break;
case 0x11: Pred = FCmpInst::FCMP_OLT; break;
case 0x12: Pred = FCmpInst::FCMP_OLE; break;
case 0x13: Pred = FCmpInst::FCMP_UNO; break;
case 0x14: Pred = FCmpInst::FCMP_UNE; break;
case 0x15: Pred = FCmpInst::FCMP_UGE; break;
case 0x16: Pred = FCmpInst::FCMP_UGT; break;
case 0x17: Pred = FCmpInst::FCMP_ORD; break;
case 0x18: Pred = FCmpInst::FCMP_UEQ; break;
case 0x19: Pred = FCmpInst::FCMP_ULT; break;
case 0x1a: Pred = FCmpInst::FCMP_ULE; break;
case 0x1b: Pred = FCmpInst::FCMP_FALSE; break;
case 0x1c: Pred = FCmpInst::FCMP_ONE; break;
case 0x1d: Pred = FCmpInst::FCMP_OGE; break;
case 0x1e: Pred = FCmpInst::FCMP_OGT; break;
case 0x1f: Pred = FCmpInst::FCMP_TRUE; break;
default: llvm_unreachable("Unhandled CC");
}
// Builtins without the _mask suffix return a vector of integers
// of the same width as the input vectors
switch (BuiltinID) {
case X86::BI__builtin_ia32_cmpps512_mask:
case X86::BI__builtin_ia32_cmppd512_mask:
case X86::BI__builtin_ia32_cmpps128_mask:
case X86::BI__builtin_ia32_cmpps256_mask:
case X86::BI__builtin_ia32_cmppd128_mask:
case X86::BI__builtin_ia32_cmppd256_mask: {
unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
}
default:
return getVectorFCmpIR(Pred);
}
}
// SSE scalar comparison intrinsics
case X86::BI__builtin_ia32_cmpeqss:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
case X86::BI__builtin_ia32_cmpltss:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
case X86::BI__builtin_ia32_cmpless:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
case X86::BI__builtin_ia32_cmpunordss:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
case X86::BI__builtin_ia32_cmpneqss:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
case X86::BI__builtin_ia32_cmpnltss:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
case X86::BI__builtin_ia32_cmpnless:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
case X86::BI__builtin_ia32_cmpordss:
return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
case X86::BI__builtin_ia32_cmpeqsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
case X86::BI__builtin_ia32_cmpltsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
case X86::BI__builtin_ia32_cmplesd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
case X86::BI__builtin_ia32_cmpunordsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
case X86::BI__builtin_ia32_cmpneqsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
case X86::BI__builtin_ia32_cmpnltsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
case X86::BI__builtin_ia32_cmpnlesd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
case X86::BI__builtin_ia32_cmpordsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
case X86::BI__emul:
case X86::BI__emulu: {
llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
bool isSigned = (BuiltinID == X86::BI__emul);
Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
}
case X86::BI__mulh:
case X86::BI__umulh:
case X86::BI_mul128:
case X86::BI_umul128: {
llvm::Type *ResType = ConvertType(E->getType());
llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
Value *MulResult, *HigherBits;
if (IsSigned) {
MulResult = Builder.CreateNSWMul(LHS, RHS);
HigherBits = Builder.CreateAShr(MulResult, 64);
} else {
MulResult = Builder.CreateNUWMul(LHS, RHS);
HigherBits = Builder.CreateLShr(MulResult, 64);
}
HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
return HigherBits;
Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
Builder.CreateStore(HigherBits, HighBitsAddress);
return Builder.CreateIntCast(MulResult, ResType, IsSigned);
}
case X86::BI__faststorefence: {
return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
llvm::SyncScope::System);
}
case X86::BI__shiftleft128:
case X86::BI__shiftright128: {
// FIXME: Once fshl/fshr no longer add an unneeded and and cmov, do this:
// llvm::Function *F = CGM.getIntrinsic(
// BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
// Int64Ty);
// Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
// return Builder.CreateCall(F, Ops);
llvm::Type *Int128Ty = Builder.getInt128Ty();
Value *Val = Builder.CreateOr(
Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64),
Builder.CreateZExt(Ops[0], Int128Ty));
Value *Amt = Builder.CreateAnd(Builder.CreateZExt(Ops[2], Int128Ty),
llvm::ConstantInt::get(Int128Ty, 0x3f));
Value *Res;
if (BuiltinID == X86::BI__shiftleft128)
Res = Builder.CreateLShr(Builder.CreateShl(Val, Amt), 64);
else
Res = Builder.CreateLShr(Val, Amt);
return Builder.CreateTrunc(Res, Int64Ty);
}
case X86::BI_ReadWriteBarrier:
case X86::BI_ReadBarrier:
case X86::BI_WriteBarrier: {
return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
llvm::SyncScope::SingleThread);
}
case X86::BI_BitScanForward:
case X86::BI_BitScanForward64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
case X86::BI_BitScanReverse:
case X86::BI_BitScanReverse64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
case X86::BI_InterlockedAnd64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
case X86::BI_InterlockedExchange64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
case X86::BI_InterlockedExchangeAdd64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
case X86::BI_InterlockedExchangeSub64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
case X86::BI_InterlockedOr64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
case X86::BI_InterlockedXor64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
case X86::BI_InterlockedDecrement64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
case X86::BI_InterlockedIncrement64:
return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
case X86::BI_InterlockedCompareExchange128: {
// InterlockedCompareExchange128 doesn't directly refer to 128bit ints,
// instead it takes pointers to 64bit ints for Destination and
// ComparandResult, and exchange is taken as two 64bit ints (high & low).
// The previous value is written to ComparandResult, and success is
// returned.
llvm::Type *Int128Ty = Builder.getInt128Ty();
llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
Value *Destination =
Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PtrTy);
Value *ExchangeHigh128 =
Builder.CreateZExt(EmitScalarExpr(E->getArg(1)), Int128Ty);
Value *ExchangeLow128 =
Builder.CreateZExt(EmitScalarExpr(E->getArg(2)), Int128Ty);
Address ComparandResult(
Builder.CreateBitCast(EmitScalarExpr(E->getArg(3)), Int128PtrTy),
getContext().toCharUnitsFromBits(128));
Value *Exchange = Builder.CreateOr(
Builder.CreateShl(ExchangeHigh128, 64, "", false, false),
ExchangeLow128);
Value *Comparand = Builder.CreateLoad(ComparandResult);
AtomicCmpXchgInst *CXI =
Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
AtomicOrdering::SequentiallyConsistent,
AtomicOrdering::SequentiallyConsistent);
CXI->setVolatile(true);
// Write the result back to the inout pointer.
Builder.CreateStore(Builder.CreateExtractValue(CXI, 0), ComparandResult);
// Get the success boolean and zero extend it to i8.
Value *Success = Builder.CreateExtractValue(CXI, 1);
return Builder.CreateZExt(Success, ConvertType(E->getType()));
}
case X86::BI_AddressOfReturnAddress: {
Value *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
return Builder.CreateCall(F);
}
case X86::BI__stosb: {
// We treat __stosb as a volatile memset - it may not generate "rep stosb"
// instruction, but it will create a memset that won't be optimized away.
return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], 1, true);
}
case X86::BI__ud2:
// llvm.trap makes a ud2a instruction on x86.
return EmitTrapCall(Intrinsic::trap);
case X86::BI__int2c: {
// This syscall signals a driver assertion failure in x86 NT kernels.
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
llvm::InlineAsm *IA =
llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*SideEffects=*/true);
llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoReturn);
CallSite CS = Builder.CreateCall(IA);
CS.setAttributes(NoReturnAttr);
return CS.getInstruction();
}
case X86::BI__readfsbyte:
case X86::BI__readfsword:
case X86::BI__readfsdword:
case X86::BI__readfsqword: {
llvm::Type *IntTy = ConvertType(E->getType());
Value *Ptr = Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
llvm::PointerType::get(IntTy, 257));
LoadInst *Load = Builder.CreateAlignedLoad(
IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
Load->setVolatile(true);
return Load;
}
case X86::BI__readgsbyte:
case X86::BI__readgsword:
case X86::BI__readgsdword:
case X86::BI__readgsqword: {
llvm::Type *IntTy = ConvertType(E->getType());
Value *Ptr = Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
llvm::PointerType::get(IntTy, 256));
LoadInst *Load = Builder.CreateAlignedLoad(
IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
Load->setVolatile(true);
return Load;
}
}
}
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
SmallVector<Value*, 4> Ops;
for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
default: return nullptr;
// __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
// call __builtin_readcyclecounter.
case PPC::BI__builtin_ppc_get_timebase:
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
// vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
case PPC::BI__builtin_altivec_lvx:
case PPC::BI__builtin_altivec_lvxl:
case PPC::BI__builtin_altivec_lvebx:
case PPC::BI__builtin_altivec_lvehx:
case PPC::BI__builtin_altivec_lvewx:
case PPC::BI__builtin_altivec_lvsl:
case PPC::BI__builtin_altivec_lvsr:
case PPC::BI__builtin_vsx_lxvd2x:
case PPC::BI__builtin_vsx_lxvw4x:
case PPC::BI__builtin_vsx_lxvd2x_be:
case PPC::BI__builtin_vsx_lxvw4x_be:
case PPC::BI__builtin_vsx_lxvl:
case PPC::BI__builtin_vsx_lxvll:
{
if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
BuiltinID == PPC::BI__builtin_vsx_lxvll){
Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
}else {
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
Ops.pop_back();
}
switch (BuiltinID) {
default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
case PPC::BI__builtin_altivec_lvx:
ID = Intrinsic::ppc_altivec_lvx;
break;
case PPC::BI__builtin_altivec_lvxl:
ID = Intrinsic::ppc_altivec_lvxl;
break;
case PPC::BI__builtin_altivec_lvebx:
ID = Intrinsic::ppc_altivec_lvebx;
break;
case PPC::BI__builtin_altivec_lvehx:
ID = Intrinsic::ppc_altivec_lvehx;
break;
case PPC::BI__builtin_altivec_lvewx:
ID = Intrinsic::ppc_altivec_lvewx;
break;
case PPC::BI__builtin_altivec_lvsl:
ID = Intrinsic::ppc_altivec_lvsl;
break;
case PPC::BI__builtin_altivec_lvsr:
ID = Intrinsic::ppc_altivec_lvsr;
break;
case PPC::BI__builtin_vsx_lxvd2x:
ID = Intrinsic::ppc_vsx_lxvd2x;
break;
case PPC::BI__builtin_vsx_lxvw4x:
ID = Intrinsic::ppc_vsx_lxvw4x;
break;
case PPC::BI__builtin_vsx_lxvd2x_be:
ID = Intrinsic::ppc_vsx_lxvd2x_be;
break;
case PPC::BI__builtin_vsx_lxvw4x_be:
ID = Intrinsic::ppc_vsx_lxvw4x_be;
break;
case PPC::BI__builtin_vsx_lxvl:
ID = Intrinsic::ppc_vsx_lxvl;
break;
case PPC::BI__builtin_vsx_lxvll:
ID = Intrinsic::ppc_vsx_lxvll;
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
}
// vec_st, vec_xst_be
case PPC::BI__builtin_altivec_stvx:
case PPC::BI__builtin_altivec_stvxl:
case PPC::BI__builtin_altivec_stvebx:
case PPC::BI__builtin_altivec_stvehx:
case PPC::BI__builtin_altivec_stvewx:
case PPC::BI__builtin_vsx_stxvd2x:
case PPC::BI__builtin_vsx_stxvw4x:
case PPC::BI__builtin_vsx_stxvd2x_be:
case PPC::BI__builtin_vsx_stxvw4x_be:
case PPC::BI__builtin_vsx_stxvl:
case PPC::BI__builtin_vsx_stxvll:
{
if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
BuiltinID == PPC::BI__builtin_vsx_stxvll ){
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
}else {
Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
Ops.pop_back();
}
switch (BuiltinID) {
default: llvm_unreachable("Unsupported st intrinsic!");
case PPC::BI__builtin_altivec_stvx:
ID = Intrinsic::ppc_altivec_stvx;
break;
case PPC::BI__builtin_altivec_stvxl:
ID = Intrinsic::ppc_altivec_stvxl;
break;
case PPC::BI__builtin_altivec_stvebx:
ID = Intrinsic::ppc_altivec_stvebx;
break;
case PPC::BI__builtin_altivec_stvehx:
ID = Intrinsic::ppc_altivec_stvehx;
break;
case PPC::BI__builtin_altivec_stvewx:
ID = Intrinsic::ppc_altivec_stvewx;
break;
case PPC::BI__builtin_vsx_stxvd2x:
ID = Intrinsic::ppc_vsx_stxvd2x;
break;
case PPC::BI__builtin_vsx_stxvw4x:
ID = Intrinsic::ppc_vsx_stxvw4x;
break;
case PPC::BI__builtin_vsx_stxvd2x_be:
ID = Intrinsic::ppc_vsx_stxvd2x_be;
break;
case PPC::BI__builtin_vsx_stxvw4x_be:
ID = Intrinsic::ppc_vsx_stxvw4x_be;
break;
case PPC::BI__builtin_vsx_stxvl:
ID = Intrinsic::ppc_vsx_stxvl;
break;
case PPC::BI__builtin_vsx_stxvll:
ID = Intrinsic::ppc_vsx_stxvll;
break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
}
// Square root
case PPC::BI__builtin_vsx_xvsqrtsp:
case PPC::BI__builtin_vsx_xvsqrtdp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
ID = Intrinsic::sqrt;
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, X);
}
// Count leading zeros
case PPC::BI__builtin_altivec_vclzb:
case PPC::BI__builtin_altivec_vclzh:
case PPC::BI__builtin_altivec_vclzw:
case PPC::BI__builtin_altivec_vclzd: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
case PPC::BI__builtin_altivec_vctzb:
case PPC::BI__builtin_altivec_vctzh:
case PPC::BI__builtin_altivec_vctzw:
case PPC::BI__builtin_altivec_vctzd: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
case PPC::BI__builtin_altivec_vpopcntb:
case PPC::BI__builtin_altivec_vpopcnth:
case PPC::BI__builtin_altivec_vpopcntw:
case PPC::BI__builtin_altivec_vpopcntd: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
return Builder.CreateCall(F, X);
}
// Copy sign
case PPC::BI__builtin_vsx_xvcpsgnsp:
case PPC::BI__builtin_vsx_xvcpsgndp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
ID = Intrinsic::copysign;
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, {X, Y});
}
// Rounding/truncation
case PPC::BI__builtin_vsx_xvrspip:
case PPC::BI__builtin_vsx_xvrdpip:
case PPC::BI__builtin_vsx_xvrdpim:
case PPC::BI__builtin_vsx_xvrspim:
case PPC::BI__builtin_vsx_xvrdpi:
case PPC::BI__builtin_vsx_xvrspi:
case PPC::BI__builtin_vsx_xvrdpic:
case PPC::BI__builtin_vsx_xvrspic:
case PPC::BI__builtin_vsx_xvrdpiz:
case PPC::BI__builtin_vsx_xvrspiz: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
BuiltinID == PPC::BI__builtin_vsx_xvrspim)
ID = Intrinsic::floor;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
BuiltinID == PPC::BI__builtin_vsx_xvrspi)
ID = Intrinsic::round;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
BuiltinID == PPC::BI__builtin_vsx_xvrspic)
ID = Intrinsic::nearbyint;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
BuiltinID == PPC::BI__builtin_vsx_xvrspip)
ID = Intrinsic::ceil;
else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
ID = Intrinsic::trunc;
llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, X);
}
// Absolute value
case PPC::BI__builtin_vsx_xvabsdp:
case PPC::BI__builtin_vsx_xvabssp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
return Builder.CreateCall(F, X);
}
// FMA variations
case PPC::BI__builtin_vsx_xvmaddadp:
case PPC::BI__builtin_vsx_xvmaddasp:
case PPC::BI__builtin_vsx_xvnmaddadp:
case PPC::BI__builtin_vsx_xvnmaddasp:
case PPC::BI__builtin_vsx_xvmsubadp:
case PPC::BI__builtin_vsx_xvmsubasp:
case PPC::BI__builtin_vsx_xvnmsubadp:
case PPC::BI__builtin_vsx_xvnmsubasp: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
switch (BuiltinID) {
case PPC::BI__builtin_vsx_xvmaddadp:
case PPC::BI__builtin_vsx_xvmaddasp:
return Builder.CreateCall(F, {X, Y, Z});
case PPC::BI__builtin_vsx_xvnmaddadp:
case PPC::BI__builtin_vsx_xvnmaddasp:
return Builder.CreateFSub(Zero,
Builder.CreateCall(F, {X, Y, Z}), "sub");
case PPC::BI__builtin_vsx_xvmsubadp:
case PPC::BI__builtin_vsx_xvmsubasp:
return Builder.CreateCall(F,
{X, Y, Builder.CreateFSub(Zero, Z, "sub")});
case PPC::BI__builtin_vsx_xvnmsubadp:
case PPC::BI__builtin_vsx_xvnmsubasp:
Value *FsubRes =
Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
return Builder.CreateFSub(Zero, FsubRes, "sub");
}
llvm_unreachable("Unknown FMA operation");
return nullptr; // Suppress no-return warning
}
case PPC::BI__builtin_vsx_insertword: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
// Third argument is a compile time constant int. It must be clamped to
// to the range [0, 12].
ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
assert(ArgCI &&
"Third arg to xxinsertw intrinsic must be constant integer");
const int64_t MaxIndex = 12;
int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
// The builtin semantics don't exactly match the xxinsertw instructions
// semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
// word from the first argument, and inserts it in the second argument. The
// instruction extracts the word from its second input register and inserts
// it into its first input register, so swap the first and second arguments.
std::swap(Ops[0], Ops[1]);
// Need to cast the second argument from a vector of unsigned int to a
// vector of long long.
Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
if (getTarget().isLittleEndian()) {
// Create a shuffle mask of (1, 0)
Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
ConstantInt::get(Int32Ty, 0)
};
Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
// Reverse the double words in the vector we will extract from.
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ShuffleMask);
// Reverse the index.
Index = MaxIndex - Index;
}
// Intrinsic expects the first arg to be a vector of int.
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
return Builder.CreateCall(F, Ops);
}
case PPC::BI__builtin_vsx_extractuword: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
// Intrinsic expects the first argument to be a vector of doublewords.
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
// The second argument is a compile time constant int that needs to
// be clamped to the range [0, 12].
ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
assert(ArgCI &&
"Second Arg to xxextractuw intrinsic must be a constant integer!");
const int64_t MaxIndex = 12;
int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
if (getTarget().isLittleEndian()) {
// Reverse the index.
Index = MaxIndex - Index;
Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
// Emit the call, then reverse the double words of the results vector.
Value *Call = Builder.CreateCall(F, Ops);
// Create a shuffle mask of (1, 0)
Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
ConstantInt::get(Int32Ty, 0)
};
Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
Value *ShuffleCall = Builder.CreateShuffleVector(Call, Call, ShuffleMask);
return ShuffleCall;
} else {
Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
return Builder.CreateCall(F, Ops);
}
}
case PPC::BI__builtin_vsx_xxpermdi: {
ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
assert(ArgCI && "Third arg must be constant integer!");
unsigned Index = ArgCI->getZExtValue();
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
// Account for endianness by treating this as just a shuffle. So we use the
// same indices for both LE and BE in order to produce expected results in
// both cases.
unsigned ElemIdx0 = (Index & 2) >> 1;
unsigned ElemIdx1 = 2 + (Index & 1);
Constant *ShuffleElts[2] = {ConstantInt::get(Int32Ty, ElemIdx0),
ConstantInt::get(Int32Ty, ElemIdx1)};
Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
Value *ShuffleCall =
Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
QualType BIRetType = E->getType();
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
}
case PPC::BI__builtin_vsx_xxsldwi: {
ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
assert(ArgCI && "Third argument must be a compile time constant");
unsigned Index = ArgCI->getZExtValue() & 0x3;
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int32Ty, 4));
// Create a shuffle mask
unsigned ElemIdx0;
unsigned ElemIdx1;
unsigned ElemIdx2;
unsigned ElemIdx3;
if (getTarget().isLittleEndian()) {
// Little endian element N comes from element 8+N-Index of the
// concatenated wide vector (of course, using modulo arithmetic on
// the total number of elements).
ElemIdx0 = (8 - Index) % 8;
ElemIdx1 = (9 - Index) % 8;
ElemIdx2 = (10 - Index) % 8;
ElemIdx3 = (11 - Index) % 8;
} else {
// Big endian ElemIdx<N> = Index + N
ElemIdx0 = Index;
ElemIdx1 = Index + 1;
ElemIdx2 = Index + 2;
ElemIdx3 = Index + 3;
}
Constant *ShuffleElts[4] = {ConstantInt::get(Int32Ty, ElemIdx0),
ConstantInt::get(Int32Ty, ElemIdx1),
ConstantInt::get(Int32Ty, ElemIdx2),
ConstantInt::get(Int32Ty, ElemIdx3)};
Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
Value *ShuffleCall =
Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
QualType BIRetType = E->getType();
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
}
}
}
Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_div_scale:
case AMDGPU::BI__builtin_amdgcn_div_scalef: {
// Translate from the intrinsics's struct return to the builtin's out
// argument.
Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
llvm::Value *X = EmitScalarExpr(E->getArg(0));
llvm::Value *Y = EmitScalarExpr(E->getArg(1));
llvm::Value *Z = EmitScalarExpr(E->getArg(2));
llvm::Value *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
X->getType());
llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
llvm::Type *RealFlagType
= FlagOutPtr.getPointer()->getType()->getPointerElementType();
llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
Builder.CreateStore(FlagExt, FlagOutPtr);
return Result;
}
case AMDGPU::BI__builtin_amdgcn_div_fmas:
case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
llvm::Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
Src0->getType());
llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
}
case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
case AMDGPU::BI__builtin_amdgcn_mov_dpp: {
llvm::SmallVector<llvm::Value *, 5> Args;
for (unsigned I = 0; I != 5; ++I)
Args.push_back(EmitScalarExpr(E->getArg(I)));
Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_mov_dpp,
Args[0]->getType());
return Builder.CreateCall(F, Args);
}
case AMDGPU::BI__builtin_amdgcn_div_fixup:
case AMDGPU::BI__builtin_amdgcn_div_fixupf:
case AMDGPU::BI__builtin_amdgcn_div_fixuph:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
case AMDGPU::BI__builtin_amdgcn_trig_preop:
case AMDGPU::BI__builtin_amdgcn_trig_preopf:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
case AMDGPU::BI__builtin_amdgcn_rcp:
case AMDGPU::BI__builtin_amdgcn_rcpf:
case AMDGPU::BI__builtin_amdgcn_rcph:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
case AMDGPU::BI__builtin_amdgcn_rsq:
case AMDGPU::BI__builtin_amdgcn_rsqf:
case AMDGPU::BI__builtin_amdgcn_rsqh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
case AMDGPU::BI__builtin_amdgcn_sinf:
case AMDGPU::BI__builtin_amdgcn_sinh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
case AMDGPU::BI__builtin_amdgcn_cosf:
case AMDGPU::BI__builtin_amdgcn_cosh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
case AMDGPU::BI__builtin_amdgcn_log_clampf:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
case AMDGPU::BI__builtin_amdgcn_ldexp:
case AMDGPU::BI__builtin_amdgcn_ldexpf:
case AMDGPU::BI__builtin_amdgcn_ldexph:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
case AMDGPU::BI__builtin_amdgcn_frexp_mant:
case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
case AMDGPU::BI__builtin_amdgcn_frexp_manth:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
case AMDGPU::BI__builtin_amdgcn_frexp_exp:
case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
Value *Src0 = EmitScalarExpr(E->getArg(0));
Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
{ Builder.getInt32Ty(), Src0->getType() });
return Builder.CreateCall(F, Src0);
}
case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
Value *Src0 = EmitScalarExpr(E->getArg(0));
Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
{ Builder.getInt16Ty(), Src0->getType() });
return Builder.CreateCall(F, Src0);
}
case AMDGPU::BI__builtin_amdgcn_fract:
case AMDGPU::BI__builtin_amdgcn_fractf:
case AMDGPU::BI__builtin_amdgcn_fracth:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
case AMDGPU::BI__builtin_amdgcn_lerp:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
case AMDGPU::BI__builtin_amdgcn_uicmp:
case AMDGPU::BI__builtin_amdgcn_uicmpl:
case AMDGPU::BI__builtin_amdgcn_sicmp:
case AMDGPU::BI__builtin_amdgcn_sicmpl:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_icmp);
case AMDGPU::BI__builtin_amdgcn_fcmp:
case AMDGPU::BI__builtin_amdgcn_fcmpf:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fcmp);
case AMDGPU::BI__builtin_amdgcn_class:
case AMDGPU::BI__builtin_amdgcn_classf:
case AMDGPU::BI__builtin_amdgcn_classh:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
case AMDGPU::BI__builtin_amdgcn_fmed3f:
case AMDGPU::BI__builtin_amdgcn_fmed3h:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
case AMDGPU::BI__builtin_amdgcn_read_exec: {
CallInst *CI = cast<CallInst>(
EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, true, "exec"));
CI->setConvergent();
return CI;
}
case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
"exec_lo" : "exec_hi";
CallInst *CI = cast<CallInst>(
EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, true, RegName));
CI->setConvergent();
return CI;
}
case AMDGPU::BI__builtin_amdgcn_ds_faddf:
case AMDGPU::BI__builtin_amdgcn_ds_fminf:
case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
llvm::SmallVector<llvm::Value *, 5> Args;
for (unsigned I = 0; I != 5; ++I)
Args.push_back(EmitScalarExpr(E->getArg(I)));
const llvm::Type *PtrTy = Args[0]->getType();
// check pointer parameter
if (!PtrTy->isPointerTy() ||
E->getArg(0)
->getType()
->getPointeeType()
.getQualifiers()
.getAddressSpace() != LangAS::opencl_local ||
!PtrTy->getPointerElementType()->isFloatTy()) {
CGM.Error(E->getArg(0)->getLocStart(),
"parameter should have type \"local float*\"");
return nullptr;
}
// check float parameter
if (!Args[1]->getType()->isFloatTy()) {
CGM.Error(E->getArg(1)->getLocStart(),
"parameter should have type \"float\"");
return nullptr;
}
Intrinsic::ID ID;
switch (BuiltinID) {
case AMDGPU::BI__builtin_amdgcn_ds_faddf:
ID = Intrinsic::amdgcn_ds_fadd;
break;
case AMDGPU::BI__builtin_amdgcn_ds_fminf:
ID = Intrinsic::amdgcn_ds_fmin;
break;
case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
ID = Intrinsic::amdgcn_ds_fmax;
break;
default:
llvm_unreachable("Unknown BuiltinID");
}
Value *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Args);
}
// amdgcn workitem
case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
// r600 intrinsics
case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
case AMDGPU::BI__builtin_r600_read_tidig_x:
return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
case AMDGPU::BI__builtin_r600_read_tidig_y:
return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
case AMDGPU::BI__builtin_r600_read_tidig_z:
return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
default:
return nullptr;
}
}
/// Handle a SystemZ function in which the final argument is a pointer
/// to an int that receives the post-instruction CC value. At the LLVM level
/// this is represented as a function that returns a {result, cc} pair.
static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
unsigned IntrinsicID,
const CallExpr *E) {
unsigned NumArgs = E->getNumArgs() - 1;
SmallVector<Value *, 8> Args(NumArgs);
for (unsigned I = 0; I < NumArgs; ++I)
Args[I] = CGF.EmitScalarExpr(E->getArg(I));
Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
Value *F = CGF.CGM.getIntrinsic(IntrinsicID);
Value *Call = CGF.Builder.CreateCall(F, Args);
Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
CGF.Builder.CreateStore(CC, CCPtr);
return CGF.Builder.CreateExtractValue(Call, 0);
}
Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
case SystemZ::BI__builtin_tbegin: {
Value *TDB = EmitScalarExpr(E->getArg(0));
Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
return Builder.CreateCall(F, {TDB, Control});
}
case SystemZ::BI__builtin_tbegin_nofloat: {
Value *TDB = EmitScalarExpr(E->getArg(0));
Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
Value *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
return Builder.CreateCall(F, {TDB, Control});
}
case SystemZ::BI__builtin_tbeginc: {
Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
Value *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
return Builder.CreateCall(F, {TDB, Control});
}
case SystemZ::BI__builtin_tabort: {
Value *Data = EmitScalarExpr(E->getArg(0));
Value *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
}
case SystemZ::BI__builtin_non_tx_store: {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *Data = EmitScalarExpr(E->getArg(1));
Value *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
return Builder.CreateCall(F, {Data, Address});
}
// Vector builtins. Note that most vector builtins are mapped automatically
// to target-specific LLVM intrinsics. The ones handled specially here can
// be represented via standard LLVM IR, which is preferable to enable common
// LLVM optimizations.
case SystemZ::BI__builtin_s390_vpopctb:
case SystemZ::BI__builtin_s390_vpopcth:
case SystemZ::BI__builtin_s390_vpopctf:
case SystemZ::BI__builtin_s390_vpopctg: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
return Builder.CreateCall(F, X);
}
case SystemZ::BI__builtin_s390_vclzb:
case SystemZ::BI__builtin_s390_vclzh:
case SystemZ::BI__builtin_s390_vclzf:
case SystemZ::BI__builtin_s390_vclzg: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
case SystemZ::BI__builtin_s390_vctzb:
case SystemZ::BI__builtin_s390_vctzh:
case SystemZ::BI__builtin_s390_vctzf:
case SystemZ::BI__builtin_s390_vctzg: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
case SystemZ::BI__builtin_s390_vfsqsb:
case SystemZ::BI__builtin_s390_vfsqdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
return Builder.CreateCall(F, X);
}
case SystemZ::BI__builtin_s390_vfmasb:
case SystemZ::BI__builtin_s390_vfmadb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
return Builder.CreateCall(F, {X, Y, Z});
}
case SystemZ::BI__builtin_s390_vfmssb:
case SystemZ::BI__builtin_s390_vfmsdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
return Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
}
case SystemZ::BI__builtin_s390_vfnmasb:
case SystemZ::BI__builtin_s390_vfnmadb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
return Builder.CreateFSub(Zero, Builder.CreateCall(F, {X, Y, Z}), "sub");
}
case SystemZ::BI__builtin_s390_vfnmssb:
case SystemZ::BI__builtin_s390_vfnmsdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
Value *Z = EmitScalarExpr(E->getArg(2));
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
Value *NegZ = Builder.CreateFSub(Zero, Z, "sub");
return Builder.CreateFSub(Zero, Builder.CreateCall(F, {X, Y, NegZ}));
}
case SystemZ::BI__builtin_s390_vflpsb:
case SystemZ::BI__builtin_s390_vflpdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
return Builder.CreateCall(F, X);
}
case SystemZ::BI__builtin_s390_vflnsb:
case SystemZ::BI__builtin_s390_vflndb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
return Builder.CreateFSub(Zero, Builder.CreateCall(F, X), "sub");
}
case SystemZ::BI__builtin_s390_vfisb:
case SystemZ::BI__builtin_s390_vfidb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
// Constant-fold the M4 and M5 mask arguments.
llvm::APSInt M4, M5;
bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext());
bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext());
assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?");
(void)IsConstM4; (void)IsConstM5;
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some combinations of M4 and M5.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch (M4.getZExtValue()) {
default: break;
case 0: // IEEE-inexact exception allowed
switch (M5.getZExtValue()) {
default: break;
case 0: ID = Intrinsic::rint; break;
}
break;
case 4: // IEEE-inexact exception suppressed
switch (M5.getZExtValue()) {
default: break;
case 0: ID = Intrinsic::nearbyint; break;
case 1: ID = Intrinsic::round; break;
case 5: ID = Intrinsic::trunc; break;
case 6: ID = Intrinsic::ceil; break;
case 7: ID = Intrinsic::floor; break;
}
break;
}
if (ID != Intrinsic::not_intrinsic) {
Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, X);
}
switch (BuiltinID) {
case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
default: llvm_unreachable("Unknown BuiltinID");
}
Function *F = CGM.getIntrinsic(ID);
Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
return Builder.CreateCall(F, {X, M4Value, M5Value});
}
case SystemZ::BI__builtin_s390_vfmaxsb:
case SystemZ::BI__builtin_s390_vfmaxdb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
// Constant-fold the M4 mask argument.
llvm::APSInt M4;
bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
assert(IsConstM4 && "Constant arg isn't actually constant?");
(void)IsConstM4;
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some values of M4.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch (M4.getZExtValue()) {
default: break;
case 4: ID = Intrinsic::maxnum; break;
}
if (ID != Intrinsic::not_intrinsic) {
Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, {X, Y});
}
switch (BuiltinID) {
case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
default: llvm_unreachable("Unknown BuiltinID");
}
Function *F = CGM.getIntrinsic(ID);
Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
return Builder.CreateCall(F, {X, Y, M4Value});
}
case SystemZ::BI__builtin_s390_vfminsb:
case SystemZ::BI__builtin_s390_vfmindb: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *X = EmitScalarExpr(E->getArg(0));
Value *Y = EmitScalarExpr(E->getArg(1));
// Constant-fold the M4 mask argument.
llvm::APSInt M4;
bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
assert(IsConstM4 && "Constant arg isn't actually constant?");
(void)IsConstM4;
// Check whether this instance can be represented via a LLVM standard
// intrinsic. We only support some values of M4.
Intrinsic::ID ID = Intrinsic::not_intrinsic;
switch (M4.getZExtValue()) {
default: break;
case 4: ID = Intrinsic::minnum; break;
}
if (ID != Intrinsic::not_intrinsic) {
Function *F = CGM.getIntrinsic(ID, ResultType);
return Builder.CreateCall(F, {X, Y});
}
switch (BuiltinID) {
case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
default: llvm_unreachable("Unknown BuiltinID");
}
Function *F = CGM.getIntrinsic(ID);
Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
return Builder.CreateCall(F, {X, Y, M4Value});
}
// Vector intrisincs that output the post-instruction CC value.
#define INTRINSIC_WITH_CC(NAME) \
case SystemZ::BI__builtin_##NAME: \
return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
INTRINSIC_WITH_CC(s390_vpkshs);
INTRINSIC_WITH_CC(s390_vpksfs);
INTRINSIC_WITH_CC(s390_vpksgs);
INTRINSIC_WITH_CC(s390_vpklshs);
INTRINSIC_WITH_CC(s390_vpklsfs);
INTRINSIC_WITH_CC(s390_vpklsgs);
INTRINSIC_WITH_CC(s390_vceqbs);
INTRINSIC_WITH_CC(s390_vceqhs);
INTRINSIC_WITH_CC(s390_vceqfs);
INTRINSIC_WITH_CC(s390_vceqgs);
INTRINSIC_WITH_CC(s390_vchbs);
INTRINSIC_WITH_CC(s390_vchhs);
INTRINSIC_WITH_CC(s390_vchfs);
INTRINSIC_WITH_CC(s390_vchgs);
INTRINSIC_WITH_CC(s390_vchlbs);
INTRINSIC_WITH_CC(s390_vchlhs);
INTRINSIC_WITH_CC(s390_vchlfs);
INTRINSIC_WITH_CC(s390_vchlgs);
INTRINSIC_WITH_CC(s390_vfaebs);
INTRINSIC_WITH_CC(s390_vfaehs);
INTRINSIC_WITH_CC(s390_vfaefs);
INTRINSIC_WITH_CC(s390_vfaezbs);
INTRINSIC_WITH_CC(s390_vfaezhs);
INTRINSIC_WITH_CC(s390_vfaezfs);
INTRINSIC_WITH_CC(s390_vfeebs);
INTRINSIC_WITH_CC(s390_vfeehs);
INTRINSIC_WITH_CC(s390_vfeefs);
INTRINSIC_WITH_CC(s390_vfeezbs);
INTRINSIC_WITH_CC(s390_vfeezhs);
INTRINSIC_WITH_CC(s390_vfeezfs);
INTRINSIC_WITH_CC(s390_vfenebs);
INTRINSIC_WITH_CC(s390_vfenehs);
INTRINSIC_WITH_CC(s390_vfenefs);
INTRINSIC_WITH_CC(s390_vfenezbs);
INTRINSIC_WITH_CC(s390_vfenezhs);
INTRINSIC_WITH_CC(s390_vfenezfs);
INTRINSIC_WITH_CC(s390_vistrbs);
INTRINSIC_WITH_CC(s390_vistrhs);
INTRINSIC_WITH_CC(s390_vistrfs);
INTRINSIC_WITH_CC(s390_vstrcbs);
INTRINSIC_WITH_CC(s390_vstrchs);
INTRINSIC_WITH_CC(s390_vstrcfs);
INTRINSIC_WITH_CC(s390_vstrczbs);
INTRINSIC_WITH_CC(s390_vstrczhs);
INTRINSIC_WITH_CC(s390_vstrczfs);
INTRINSIC_WITH_CC(s390_vfcesbs);
INTRINSIC_WITH_CC(s390_vfcedbs);
INTRINSIC_WITH_CC(s390_vfchsbs);
INTRINSIC_WITH_CC(s390_vfchdbs);
INTRINSIC_WITH_CC(s390_vfchesbs);
INTRINSIC_WITH_CC(s390_vfchedbs);
INTRINSIC_WITH_CC(s390_vftcisb);
INTRINSIC_WITH_CC(s390_vftcidb);
#undef INTRINSIC_WITH_CC
default:
return nullptr;
}
}
Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
auto MakeLdg = [&](unsigned IntrinsicID) {
Value *Ptr = EmitScalarExpr(E->getArg(0));
clang::CharUnits Align =
getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
return Builder.CreateCall(
CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
Ptr->getType()}),
{Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
};
auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
Value *Ptr = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
Ptr->getType()}),
{Ptr, EmitScalarExpr(E->getArg(1))});
};
switch (BuiltinID) {
case NVPTX::BI__nvvm_atom_add_gen_i:
case NVPTX::BI__nvvm_atom_add_gen_l:
case NVPTX::BI__nvvm_atom_add_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
case NVPTX::BI__nvvm_atom_sub_gen_i:
case NVPTX::BI__nvvm_atom_sub_gen_l:
case NVPTX::BI__nvvm_atom_sub_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
case NVPTX::BI__nvvm_atom_and_gen_i:
case NVPTX::BI__nvvm_atom_and_gen_l:
case NVPTX::BI__nvvm_atom_and_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
case NVPTX::BI__nvvm_atom_or_gen_i:
case NVPTX::BI__nvvm_atom_or_gen_l:
case NVPTX::BI__nvvm_atom_or_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
case NVPTX::BI__nvvm_atom_xor_gen_i:
case NVPTX::BI__nvvm_atom_xor_gen_l:
case NVPTX::BI__nvvm_atom_xor_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
case NVPTX::BI__nvvm_atom_xchg_gen_i:
case NVPTX::BI__nvvm_atom_xchg_gen_l:
case NVPTX::BI__nvvm_atom_xchg_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
case NVPTX::BI__nvvm_atom_max_gen_i:
case NVPTX::BI__nvvm_atom_max_gen_l:
case NVPTX::BI__nvvm_atom_max_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
case NVPTX::BI__nvvm_atom_max_gen_ui:
case NVPTX::BI__nvvm_atom_max_gen_ul:
case NVPTX::BI__nvvm_atom_max_gen_ull:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
case NVPTX::BI__nvvm_atom_min_gen_i:
case NVPTX::BI__nvvm_atom_min_gen_l:
case NVPTX::BI__nvvm_atom_min_gen_ll:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
case NVPTX::BI__nvvm_atom_min_gen_ui:
case NVPTX::BI__nvvm_atom_min_gen_ul:
case NVPTX::BI__nvvm_atom_min_gen_ull:
return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
case NVPTX::BI__nvvm_atom_cas_gen_i:
case NVPTX::BI__nvvm_atom_cas_gen_l:
case NVPTX::BI__nvvm_atom_cas_gen_ll:
// __nvvm_atom_cas_gen_* should return the old value rather than the
// success flag.
return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
case NVPTX::BI__nvvm_atom_add_gen_f: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
// atomicrmw only deals with integer arguments so we need to use
// LLVM's nvvm_atomic_load_add_f32 intrinsic for that.
Value *FnALAF32 =
CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f32, Ptr->getType());
return Builder.CreateCall(FnALAF32, {Ptr, Val});
}
case NVPTX::BI__nvvm_atom_add_gen_d: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
// atomicrmw only deals with integer arguments, so we need to use
// LLVM's nvvm_atomic_load_add_f64 intrinsic.
Value *FnALAF64 =
CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f64, Ptr->getType());
return Builder.CreateCall(FnALAF64, {Ptr, Val});
}
case NVPTX::BI__nvvm_atom_inc_gen_ui: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
Value *FnALI32 =
CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
return Builder.CreateCall(FnALI32, {Ptr, Val});
}
case NVPTX::BI__nvvm_atom_dec_gen_ui: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
Value *FnALD32 =
CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
return Builder.CreateCall(FnALD32, {Ptr, Val});
}
case NVPTX::BI__nvvm_ldg_c:
case NVPTX::BI__nvvm_ldg_c2:
case NVPTX::BI__nvvm_ldg_c4:
case NVPTX::BI__nvvm_ldg_s:
case NVPTX::BI__nvvm_ldg_s2:
case NVPTX::BI__nvvm_ldg_s4:
case NVPTX::BI__nvvm_ldg_i:
case NVPTX::BI__nvvm_ldg_i2:
case NVPTX::BI__nvvm_ldg_i4:
case NVPTX::BI__nvvm_ldg_l:
case NVPTX::BI__nvvm_ldg_ll:
case NVPTX::BI__nvvm_ldg_ll2:
case NVPTX::BI__nvvm_ldg_uc:
case NVPTX::BI__nvvm_ldg_uc2:
case NVPTX::BI__nvvm_ldg_uc4:
case NVPTX::BI__nvvm_ldg_us:
case NVPTX::BI__nvvm_ldg_us2:
case NVPTX::BI__nvvm_ldg_us4:
case NVPTX::BI__nvvm_ldg_ui:
case NVPTX::BI__nvvm_ldg_ui2:
case NVPTX::BI__nvvm_ldg_ui4:
case NVPTX::BI__nvvm_ldg_ul:
case NVPTX::BI__nvvm_ldg_ull:
case NVPTX::BI__nvvm_ldg_ull2:
// PTX Interoperability section 2.2: "For a vector with an even number of
// elements, its alignment is set to number of elements times the alignment
// of its member: n*alignof(t)."
return MakeLdg(Intrinsic::nvvm_ldg_global_i);
case NVPTX::BI__nvvm_ldg_f:
case NVPTX::BI__nvvm_ldg_f2:
case NVPTX::BI__nvvm_ldg_f4:
case NVPTX::BI__nvvm_ldg_d:
case NVPTX::BI__nvvm_ldg_d2:
return MakeLdg(Intrinsic::nvvm_ldg_global_f);
case NVPTX::BI__nvvm_atom_cta_add_gen_i:
case NVPTX::BI__nvvm_atom_cta_add_gen_l:
case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_add_gen_i:
case NVPTX::BI__nvvm_atom_sys_add_gen_l:
case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_add_gen_f:
case NVPTX::BI__nvvm_atom_cta_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
case NVPTX::BI__nvvm_atom_sys_add_gen_f:
case NVPTX::BI__nvvm_atom_sys_add_gen_d:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_max_gen_i:
case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
case NVPTX::BI__nvvm_atom_cta_max_gen_l:
case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_max_gen_i:
case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
case NVPTX::BI__nvvm_atom_sys_max_gen_l:
case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_min_gen_i:
case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
case NVPTX::BI__nvvm_atom_cta_min_gen_l:
case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_min_gen_i:
case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
case NVPTX::BI__nvvm_atom_sys_min_gen_l:
case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_and_gen_i:
case NVPTX::BI__nvvm_atom_cta_and_gen_l:
case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_and_gen_i:
case NVPTX::BI__nvvm_atom_sys_and_gen_l:
case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_or_gen_i:
case NVPTX::BI__nvvm_atom_cta_or_gen_l:
case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_or_gen_i:
case NVPTX::BI__nvvm_atom_sys_or_gen_l:
case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(
Intrinsic::nvvm_atomic_cas_gen_i_cta,
{Ptr->getType()->getPointerElementType(), Ptr->getType()}),
{Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
}
case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(
Intrinsic::nvvm_atomic_cas_gen_i_sys,
{Ptr->getType()->getPointerElementType(), Ptr->getType()}),
{Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
}
case NVPTX::BI__nvvm_match_all_sync_i32p:
case NVPTX::BI__nvvm_match_all_sync_i64p: {
Value *Mask = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
Value *ResultPair = Builder.CreateCall(
CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
? Intrinsic::nvvm_match_all_sync_i32p
: Intrinsic::nvvm_match_all_sync_i64p),
{Mask, Val});
Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
PredOutPtr.getElementType());
Builder.CreateStore(Pred, PredOutPtr);
return Builder.CreateExtractValue(ResultPair, 0);
}
case NVPTX::BI__hmma_m16n16k16_ld_a:
case NVPTX::BI__hmma_m16n16k16_ld_b:
case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
case NVPTX::BI__hmma_m32n8k16_ld_a:
case NVPTX::BI__hmma_m32n8k16_ld_b:
case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
case NVPTX::BI__hmma_m8n32k16_ld_a:
case NVPTX::BI__hmma_m8n32k16_ld_b:
case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
case NVPTX::BI__hmma_m8n32k16_ld_c_f32: {
Address Dst = EmitPointerWithAlignment(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
llvm::APSInt isColMajorArg;
if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
return nullptr;
bool isColMajor = isColMajorArg.getSExtValue();
unsigned IID;
unsigned NumResults;
switch (BuiltinID) {
case NVPTX::BI__hmma_m16n16k16_ld_a:
IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_col_stride
: Intrinsic::nvvm_wmma_m16n16k16_load_a_f16_row_stride;
NumResults = 8;
break;
case NVPTX::BI__hmma_m16n16k16_ld_b:
IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_col_stride
: Intrinsic::nvvm_wmma_m16n16k16_load_b_f16_row_stride;
NumResults = 8;
break;
case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_col_stride
: Intrinsic::nvvm_wmma_m16n16k16_load_c_f16_row_stride;
NumResults = 4;
break;
case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_col_stride
: Intrinsic::nvvm_wmma_m16n16k16_load_c_f32_row_stride;
NumResults = 8;
break;
case NVPTX::BI__hmma_m32n8k16_ld_a:
IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_col_stride
: Intrinsic::nvvm_wmma_m32n8k16_load_a_f16_row_stride;
NumResults = 8;
break;
case NVPTX::BI__hmma_m32n8k16_ld_b:
IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_col_stride
: Intrinsic::nvvm_wmma_m32n8k16_load_b_f16_row_stride;
NumResults = 8;
break;
case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_col_stride
: Intrinsic::nvvm_wmma_m32n8k16_load_c_f16_row_stride;
NumResults = 4;
break;
case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_col_stride
: Intrinsic::nvvm_wmma_m32n8k16_load_c_f32_row_stride;
NumResults = 8;
break;
case NVPTX::BI__hmma_m8n32k16_ld_a:
IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_col_stride
: Intrinsic::nvvm_wmma_m8n32k16_load_a_f16_row_stride;
NumResults = 8;
break;
case NVPTX::BI__hmma_m8n32k16_ld_b:
IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_col_stride
: Intrinsic::nvvm_wmma_m8n32k16_load_b_f16_row_stride;
NumResults = 8;
break;
case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_col_stride
: Intrinsic::nvvm_wmma_m8n32k16_load_c_f16_row_stride;
NumResults = 4;
break;
case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_col_stride
: Intrinsic::nvvm_wmma_m8n32k16_load_c_f32_row_stride;
NumResults = 8;
break;
default:
llvm_unreachable("Unexpected builtin ID.");
}
Value *Result =
Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
// Save returned values.
for (unsigned i = 0; i < NumResults; ++i) {
Builder.CreateAlignedStore(
Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
Dst.getElementType()),
Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
}
return Result;
}
case NVPTX::BI__hmma_m16n16k16_st_c_f16:
case NVPTX::BI__hmma_m16n16k16_st_c_f32:
case NVPTX::BI__hmma_m32n8k16_st_c_f16:
case NVPTX::BI__hmma_m32n8k16_st_c_f32:
case NVPTX::BI__hmma_m8n32k16_st_c_f16:
case NVPTX::BI__hmma_m8n32k16_st_c_f32: {
Value *Dst = EmitScalarExpr(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
llvm::APSInt isColMajorArg;
if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
return nullptr;
bool isColMajor = isColMajorArg.getSExtValue();
unsigned IID;
unsigned NumResults = 8;
// PTX Instructions (and LLVM instrinsics) are defined for slice _d_, yet
// for some reason nvcc builtins use _c_.
switch (BuiltinID) {
case NVPTX::BI__hmma_m16n16k16_st_c_f16:
IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_col_stride
: Intrinsic::nvvm_wmma_m16n16k16_store_d_f16_row_stride;
NumResults = 4;
break;
case NVPTX::BI__hmma_m16n16k16_st_c_f32:
IID = isColMajor ? Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_col_stride
: Intrinsic::nvvm_wmma_m16n16k16_store_d_f32_row_stride;
break;
case NVPTX::BI__hmma_m32n8k16_st_c_f16:
IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_col_stride
: Intrinsic::nvvm_wmma_m32n8k16_store_d_f16_row_stride;
NumResults = 4;
break;
case NVPTX::BI__hmma_m32n8k16_st_c_f32:
IID = isColMajor ? Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_col_stride
: Intrinsic::nvvm_wmma_m32n8k16_store_d_f32_row_stride;
break;
case NVPTX::BI__hmma_m8n32k16_st_c_f16:
IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_col_stride
: Intrinsic::nvvm_wmma_m8n32k16_store_d_f16_row_stride;
NumResults = 4;
break;
case NVPTX::BI__hmma_m8n32k16_st_c_f32:
IID = isColMajor ? Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_col_stride
: Intrinsic::nvvm_wmma_m8n32k16_store_d_f32_row_stride;
break;
default:
llvm_unreachable("Unexpected builtin ID.");
}
Function *Intrinsic = CGM.getIntrinsic(IID, Dst->getType());
llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
SmallVector<Value *, 10> Values = {Dst};
for (unsigned i = 0; i < NumResults; ++i) {
Value *V = Builder.CreateAlignedLoad(
Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, ParamType));
}
Values.push_back(Ldm);
Value *Result = Builder.CreateCall(Intrinsic, Values);
return Result;
}
// BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
// Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
case NVPTX::BI__hmma_m8n32k16_mma_f16f32: {
Address Dst = EmitPointerWithAlignment(E->getArg(0));
Address SrcA = EmitPointerWithAlignment(E->getArg(1));
Address SrcB = EmitPointerWithAlignment(E->getArg(2));
Address SrcC = EmitPointerWithAlignment(E->getArg(3));
llvm::APSInt LayoutArg;
if (!E->getArg(4)->isIntegerConstantExpr(LayoutArg, getContext()))
return nullptr;
int Layout = LayoutArg.getSExtValue();
if (Layout < 0 || Layout > 3)
return nullptr;
llvm::APSInt SatfArg;
if (!E->getArg(5)->isIntegerConstantExpr(SatfArg, getContext()))
return nullptr;
bool Satf = SatfArg.getSExtValue();
// clang-format off
#define MMA_VARIANTS(geom, type) {{ \
Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
}}
// clang-format on
auto getMMAIntrinsic = [Layout, Satf](std::array<unsigned, 8> Variants) {
unsigned Index = Layout * 2 + Satf;
assert(Index < 8);
return Variants[Index];
};
unsigned IID;
unsigned NumEltsC;
unsigned NumEltsD;
switch (BuiltinID) {
case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f16_f16));
NumEltsC = 4;
NumEltsD = 4;
break;
case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f32_f16));
NumEltsC = 4;
NumEltsD = 8;
break;
case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f16_f32));
NumEltsC = 8;
NumEltsD = 4;
break;
case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
IID = getMMAIntrinsic(MMA_VARIANTS(m16n16k16, f32_f32));
NumEltsC = 8;
NumEltsD = 8;
break;
case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f16_f16));
NumEltsC = 4;
NumEltsD = 4;
break;
case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f32_f16));
NumEltsC = 4;
NumEltsD = 8;
break;
case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f16_f32));
NumEltsC = 8;
NumEltsD = 4;
break;
case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
IID = getMMAIntrinsic(MMA_VARIANTS(m32n8k16, f32_f32));
NumEltsC = 8;
NumEltsD = 8;
break;
case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f16_f16));
NumEltsC = 4;
NumEltsD = 4;
break;
case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f32_f16));
NumEltsC = 4;
NumEltsD = 8;
break;
case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f16_f32));
NumEltsC = 8;
NumEltsD = 4;
break;
case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
IID = getMMAIntrinsic(MMA_VARIANTS(m8n32k16, f32_f32));
NumEltsC = 8;
NumEltsD = 8;
break;
default:
llvm_unreachable("Unexpected builtin ID.");
}
#undef MMA_VARIANTS
SmallVector<Value *, 24> Values;
Function *Intrinsic = CGM.getIntrinsic(IID);
llvm::Type *ABType = Intrinsic->getFunctionType()->getParamType(0);
// Load A
for (unsigned i = 0; i < 8; ++i) {
Value *V = Builder.CreateAlignedLoad(
Builder.CreateGEP(SrcA.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, ABType));
}
// Load B
for (unsigned i = 0; i < 8; ++i) {
Value *V = Builder.CreateAlignedLoad(
Builder.CreateGEP(SrcB.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, ABType));
}
// Load C
llvm::Type *CType = Intrinsic->getFunctionType()->getParamType(16);
for (unsigned i = 0; i < NumEltsC; ++i) {
Value *V = Builder.CreateAlignedLoad(
Builder.CreateGEP(SrcC.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, CType));
}
Value *Result = Builder.CreateCall(Intrinsic, Values);
llvm::Type *DType = Dst.getElementType();
for (unsigned i = 0; i < NumEltsD; ++i)
Builder.CreateAlignedStore(
Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
return Result;
}
default:
return nullptr;
}
}
Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_memory_size: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *I = EmitScalarExpr(E->getArg(0));
Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
return Builder.CreateCall(Callee, I);
}
case WebAssembly::BI__builtin_wasm_memory_grow: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *Args[] = {
EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(1))
};
Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
return Builder.CreateCall(Callee, Args);
}
case WebAssembly::BI__builtin_wasm_mem_size: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *I = EmitScalarExpr(E->getArg(0));
Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_mem_size, ResultType);
return Builder.CreateCall(Callee, I);
}
case WebAssembly::BI__builtin_wasm_mem_grow: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *Args[] = {
EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(1))
};
Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_mem_grow, ResultType);
return Builder.CreateCall(Callee, Args);
}
case WebAssembly::BI__builtin_wasm_current_memory: {
llvm::Type *ResultType = ConvertType(E->getType());
Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_current_memory, ResultType);
return Builder.CreateCall(Callee);
}
case WebAssembly::BI__builtin_wasm_grow_memory: {
Value *X = EmitScalarExpr(E->getArg(0));
Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_grow_memory, X->getType());
return Builder.CreateCall(Callee, X);
}
case WebAssembly::BI__builtin_wasm_throw: {
Value *Tag = EmitScalarExpr(E->getArg(0));
Value *Obj = EmitScalarExpr(E->getArg(1));
Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
return Builder.CreateCall(Callee, {Tag, Obj});
}
case WebAssembly::BI__builtin_wasm_rethrow: {
Value *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow);
return Builder.CreateCall(Callee);
}
default:
return nullptr;
}
}
Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
SmallVector<llvm::Value *, 4> Ops;
Intrinsic::ID ID = Intrinsic::not_intrinsic;
auto MakeCircLd = [&](unsigned IntID, bool HasImm) {
// The base pointer is passed by address, so it needs to be loaded.
Address BP = EmitPointerWithAlignment(E->getArg(0));
BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
BP.getAlignment());
llvm::Value *Base = Builder.CreateLoad(BP);
// Operands are Base, Increment, Modifier, Start.
if (HasImm)
Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
EmitScalarExpr(E->getArg(3)) };
else
Ops = { Base, EmitScalarExpr(E->getArg(1)),
EmitScalarExpr(E->getArg(2)) };
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
llvm::Value *NewBase = Builder.CreateExtractValue(Result, 1);
llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
NewBase->getType()->getPointerTo());
Address Dest = EmitPointerWithAlignment(E->getArg(0));
// The intrinsic generates two results. The new value for the base pointer
// needs to be stored.
Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
return Builder.CreateExtractValue(Result, 0);
};
auto MakeCircSt = [&](unsigned IntID, bool HasImm) {
// The base pointer is passed by address, so it needs to be loaded.
Address BP = EmitPointerWithAlignment(E->getArg(0));
BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
BP.getAlignment());
llvm::Value *Base = Builder.CreateLoad(BP);
// Operands are Base, Increment, Modifier, Value, Start.
if (HasImm)
Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
EmitScalarExpr(E->getArg(3)), EmitScalarExpr(E->getArg(4)) };
else
Ops = { Base, EmitScalarExpr(E->getArg(1)),
EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)) };
llvm::Value *NewBase = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
NewBase->getType()->getPointerTo());
Address Dest = EmitPointerWithAlignment(E->getArg(0));
// The intrinsic generates one result, which is the new value for the base
// pointer. It needs to be stored.
return Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
};
// Handle the conversion of bit-reverse load intrinsics to bit code.
// The intrinsic call after this function only reads from memory and the
// write to memory is dealt by the store instruction.
auto MakeBrevLd = [&](unsigned IntID, llvm::Type *DestTy) {
// The intrinsic generates one result, which is the new value for the base
// pointer. It needs to be returned. The result of the load instruction is
// passed to intrinsic by address, so the value needs to be stored.
llvm::Value *BaseAddress =
Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
// Expressions like &(*pt++) will be incremented per evaluation.
// EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
// per call.
Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
DestAddr.getAlignment());
llvm::Value *DestAddress = DestAddr.getPointer();
// Operands are Base, Dest, Modifier.
// The intrinsic format in LLVM IR is defined as
// { ValueType, i8* } (i8*, i32).
Ops = {BaseAddress, EmitScalarExpr(E->getArg(2))};
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
// The value needs to be stored as the variable is passed by reference.
llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
// The store needs to be truncated to fit the destination type.
// While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
// to be handled with stores of respective destination type.
DestVal = Builder.CreateTrunc(DestVal, DestTy);
llvm::Value *DestForStore =
Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
// The updated value of the base pointer is returned.
return Builder.CreateExtractValue(Result, 1);
};
switch (BuiltinID) {
case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: {
Address Dest = EmitPointerWithAlignment(E->getArg(2));
unsigned Size;
if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vaddcarry) {
Size = 512;
ID = Intrinsic::hexagon_V6_vaddcarry;
} else {
Size = 1024;
ID = Intrinsic::hexagon_V6_vaddcarry_128B;
}
Dest = Builder.CreateBitCast(Dest,
llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
LoadInst *QLd = Builder.CreateLoad(Dest);
Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
Vprd->getType()->getPointerTo(0));
Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
return Builder.CreateExtractValue(Result, 0);
}
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
Address Dest = EmitPointerWithAlignment(E->getArg(2));
unsigned Size;
if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vsubcarry) {
Size = 512;
ID = Intrinsic::hexagon_V6_vsubcarry;
} else {
Size = 1024;
ID = Intrinsic::hexagon_V6_vsubcarry_128B;
}
Dest = Builder.CreateBitCast(Dest,
llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
LoadInst *QLd = Builder.CreateLoad(Dest);
Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
Vprd->getType()->getPointerTo(0));
Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
return Builder.CreateExtractValue(Result, 0);
}
case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
return MakeCircLd(Intrinsic::hexagon_L2_loadri_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
return MakeCircLd(Intrinsic::hexagon_L2_loadri_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
return MakeCircSt(Intrinsic::hexagon_S2_storerb_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
return MakeCircSt(Intrinsic::hexagon_S2_storerh_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
return MakeCircSt(Intrinsic::hexagon_S2_storerf_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
return MakeCircSt(Intrinsic::hexagon_S2_storeri_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
return MakeCircSt(Intrinsic::hexagon_S2_storerd_pci, /*HasImm*/true);
case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
return MakeCircSt(Intrinsic::hexagon_S2_storerb_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
return MakeCircSt(Intrinsic::hexagon_S2_storerh_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
return MakeCircSt(Intrinsic::hexagon_S2_storerf_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
return MakeCircSt(Intrinsic::hexagon_S2_storeri_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
return MakeCircSt(Intrinsic::hexagon_S2_storerd_pcr, /*HasImm*/false);
case Hexagon::BI__builtin_brev_ldub:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
case Hexagon::BI__builtin_brev_ldb:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
case Hexagon::BI__builtin_brev_lduh:
return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
case Hexagon::BI__builtin_brev_ldh:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
case Hexagon::BI__builtin_brev_ldw:
return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
case Hexagon::BI__builtin_brev_ldd:
return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
default:
break;
} // switch
return nullptr;
}
Index: projects/clang700-import/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/tools/clang/lib/Driver/Driver.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/clang/lib/Driver/Driver.cpp (revision 340125)
@@ -1,4528 +1,4529 @@
//===--- Driver.cpp - Clang GCC Compatible Driver -------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "clang/Driver/Driver.h"
#include "InputInfo.h"
#include "ToolChains/AMDGPU.h"
#include "ToolChains/AVR.h"
#include "ToolChains/Ananas.h"
#include "ToolChains/BareMetal.h"
#include "ToolChains/Clang.h"
#include "ToolChains/CloudABI.h"
#include "ToolChains/Contiki.h"
#include "ToolChains/CrossWindows.h"
#include "ToolChains/Cuda.h"
#include "ToolChains/Darwin.h"
#include "ToolChains/DragonFly.h"
#include "ToolChains/FreeBSD.h"
#include "ToolChains/Fuchsia.h"
#include "ToolChains/Gnu.h"
#include "ToolChains/HIP.h"
#include "ToolChains/Haiku.h"
#include "ToolChains/Hexagon.h"
#include "ToolChains/Lanai.h"
#include "ToolChains/Linux.h"
#include "ToolChains/MSVC.h"
#include "ToolChains/MinGW.h"
#include "ToolChains/Minix.h"
#include "ToolChains/MipsLinux.h"
#include "ToolChains/Myriad.h"
#include "ToolChains/NaCl.h"
#include "ToolChains/NetBSD.h"
#include "ToolChains/OpenBSD.h"
#include "ToolChains/PS4CPU.h"
#include "ToolChains/RISCV.h"
#include "ToolChains/Solaris.h"
#include "ToolChains/TCE.h"
#include "ToolChains/WebAssembly.h"
#include "ToolChains/XCore.h"
#include "clang/Basic/Version.h"
#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Job.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptSpecifier.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/StringSaver.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
#include <memory>
#include <utility>
#if LLVM_ON_UNIX
#include <unistd.h> // getpid
#endif
using namespace clang::driver;
using namespace clang;
using namespace llvm::opt;
Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
DiagnosticsEngine &Diags,
IntrusiveRefCntPtr<vfs::FileSystem> VFS)
: Opts(createDriverOptTable()), Diags(Diags), VFS(std::move(VFS)),
Mode(GCCMode), SaveTemps(SaveTempsNone), BitcodeEmbed(EmbedNone),
LTOMode(LTOK_None), ClangExecutable(ClangExecutable),
SysRoot(DEFAULT_SYSROOT), DriverTitle("clang LLVM compiler"),
CCPrintOptionsFilename(nullptr), CCPrintHeadersFilename(nullptr),
CCLogDiagnosticsFilename(nullptr), CCCPrintBindings(false),
CCPrintOptions(false), CCPrintHeaders(false), CCLogDiagnostics(false),
CCGenDiagnostics(false), TargetTriple(TargetTriple),
CCCGenericGCCName(""), Saver(Alloc), CheckInputsExist(true),
CCCUsePCH(true), GenReproducer(false),
SuppressMissingInputWarning(false) {
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
this->VFS = vfs::getRealFileSystem();
Name = llvm::sys::path::filename(ClangExecutable);
Dir = llvm::sys::path::parent_path(ClangExecutable);
InstalledDir = Dir; // Provide a sensible default installed dir.
#if defined(CLANG_CONFIG_FILE_SYSTEM_DIR)
SystemConfigDir = CLANG_CONFIG_FILE_SYSTEM_DIR;
#endif
#if defined(CLANG_CONFIG_FILE_USER_DIR)
UserConfigDir = CLANG_CONFIG_FILE_USER_DIR;
#endif
// Compute the path to the resource directory.
StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
SmallString<128> P(Dir);
if (ClangResourceDir != "") {
llvm::sys::path::append(P, ClangResourceDir);
} else {
StringRef ClangLibdirSuffix(CLANG_LIBDIR_SUFFIX);
P = llvm::sys::path::parent_path(Dir);
llvm::sys::path::append(P, Twine("lib") + ClangLibdirSuffix, "clang",
CLANG_VERSION_STRING);
}
ResourceDir = P.str();
}
void Driver::ParseDriverMode(StringRef ProgramName,
ArrayRef<const char *> Args) {
if (ClangNameParts.isEmpty())
ClangNameParts = ToolChain::getTargetAndModeFromProgramName(ProgramName);
setDriverModeFromOption(ClangNameParts.DriverMode);
for (const char *ArgPtr : Args) {
// Ignore nullptrs, they are the response file's EOL markers.
if (ArgPtr == nullptr)
continue;
const StringRef Arg = ArgPtr;
setDriverModeFromOption(Arg);
}
}
void Driver::setDriverModeFromOption(StringRef Opt) {
const std::string OptName =
getOpts().getOption(options::OPT_driver_mode).getPrefixedName();
if (!Opt.startswith(OptName))
return;
StringRef Value = Opt.drop_front(OptName.size());
if (auto M = llvm::StringSwitch<llvm::Optional<DriverMode>>(Value)
.Case("gcc", GCCMode)
.Case("g++", GXXMode)
.Case("cpp", CPPMode)
.Case("cl", CLMode)
.Default(None))
Mode = *M;
else
Diag(diag::err_drv_unsupported_option_argument) << OptName << Value;
}
InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings,
bool &ContainsError) {
llvm::PrettyStackTraceString CrashInfo("Command line argument parsing");
ContainsError = false;
unsigned IncludedFlagsBitmask;
unsigned ExcludedFlagsBitmask;
std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
getIncludeExcludeOptionFlagMasks();
unsigned MissingArgIndex, MissingArgCount;
InputArgList Args =
getOpts().ParseArgs(ArgStrings, MissingArgIndex, MissingArgCount,
IncludedFlagsBitmask, ExcludedFlagsBitmask);
// Check for missing argument error.
if (MissingArgCount) {
Diag(diag::err_drv_missing_argument)
<< Args.getArgString(MissingArgIndex) << MissingArgCount;
ContainsError |=
Diags.getDiagnosticLevel(diag::err_drv_missing_argument,
SourceLocation()) > DiagnosticsEngine::Warning;
}
// Check for unsupported options.
for (const Arg *A : Args) {
if (A->getOption().hasFlag(options::Unsupported)) {
unsigned DiagID;
auto ArgString = A->getAsString(Args);
std::string Nearest;
if (getOpts().findNearest(
ArgString, Nearest, IncludedFlagsBitmask,
ExcludedFlagsBitmask | options::Unsupported) > 1) {
DiagID = diag::err_drv_unsupported_opt;
Diag(DiagID) << ArgString;
} else {
DiagID = diag::err_drv_unsupported_opt_with_suggestion;
Diag(DiagID) << ArgString << Nearest;
}
ContainsError |= Diags.getDiagnosticLevel(DiagID, SourceLocation()) >
DiagnosticsEngine::Warning;
continue;
}
// Warn about -mcpu= without an argument.
if (A->getOption().matches(options::OPT_mcpu_EQ) && A->containsValue("")) {
Diag(diag::warn_drv_empty_joined_argument) << A->getAsString(Args);
ContainsError |= Diags.getDiagnosticLevel(
diag::warn_drv_empty_joined_argument,
SourceLocation()) > DiagnosticsEngine::Warning;
}
}
for (const Arg *A : Args.filtered(options::OPT_UNKNOWN)) {
unsigned DiagID;
auto ArgString = A->getAsString(Args);
std::string Nearest;
if (getOpts().findNearest(
ArgString, Nearest, IncludedFlagsBitmask, ExcludedFlagsBitmask) > 1) {
DiagID = IsCLMode() ? diag::warn_drv_unknown_argument_clang_cl
: diag::err_drv_unknown_argument;
Diags.Report(DiagID) << ArgString;
} else {
DiagID = IsCLMode() ? diag::warn_drv_unknown_argument_clang_cl_with_suggestion
: diag::err_drv_unknown_argument_with_suggestion;
Diags.Report(DiagID) << ArgString << Nearest;
}
ContainsError |= Diags.getDiagnosticLevel(DiagID, SourceLocation()) >
DiagnosticsEngine::Warning;
}
return Args;
}
// Determine which compilation mode we are in. We look for options which
// affect the phase, starting with the earliest phases, and record which
// option we used to determine the final phase.
phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
Arg **FinalPhaseArg) const {
Arg *PhaseArg = nullptr;
phases::ID FinalPhase;
// -{E,EP,P,M,MM} only run the preprocessor.
if (CCCIsCPP() || (PhaseArg = DAL.getLastArg(options::OPT_E)) ||
(PhaseArg = DAL.getLastArg(options::OPT__SLASH_EP)) ||
(PhaseArg = DAL.getLastArg(options::OPT_M, options::OPT_MM)) ||
(PhaseArg = DAL.getLastArg(options::OPT__SLASH_P))) {
FinalPhase = phases::Preprocess;
// --precompile only runs up to precompilation.
} else if ((PhaseArg = DAL.getLastArg(options::OPT__precompile))) {
FinalPhase = phases::Precompile;
// -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
} else if ((PhaseArg = DAL.getLastArg(options::OPT_fsyntax_only)) ||
(PhaseArg = DAL.getLastArg(options::OPT_module_file_info)) ||
(PhaseArg = DAL.getLastArg(options::OPT_verify_pch)) ||
(PhaseArg = DAL.getLastArg(options::OPT_rewrite_objc)) ||
(PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) ||
(PhaseArg = DAL.getLastArg(options::OPT__migrate)) ||
(PhaseArg = DAL.getLastArg(options::OPT__analyze,
options::OPT__analyze_auto)) ||
(PhaseArg = DAL.getLastArg(options::OPT_emit_ast))) {
FinalPhase = phases::Compile;
// -S only runs up to the backend.
} else if ((PhaseArg = DAL.getLastArg(options::OPT_S))) {
FinalPhase = phases::Backend;
// -c compilation only runs up to the assembler.
} else if ((PhaseArg = DAL.getLastArg(options::OPT_c))) {
FinalPhase = phases::Assemble;
// Otherwise do everything.
} else
FinalPhase = phases::Link;
if (FinalPhaseArg)
*FinalPhaseArg = PhaseArg;
return FinalPhase;
}
static Arg *MakeInputArg(DerivedArgList &Args, OptTable &Opts,
StringRef Value, bool Claim = true) {
Arg *A = new Arg(Opts.getOption(options::OPT_INPUT), Value,
Args.getBaseArgs().MakeIndex(Value), Value.data());
Args.AddSynthesizedArg(A);
if (Claim)
A->claim();
return A;
}
DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
DerivedArgList *DAL = new DerivedArgList(Args);
bool HasNostdlib = Args.hasArg(options::OPT_nostdlib);
bool HasNodefaultlib = Args.hasArg(options::OPT_nodefaultlibs);
for (Arg *A : Args) {
// Unfortunately, we have to parse some forwarding options (-Xassembler,
// -Xlinker, -Xpreprocessor) because we either integrate their functionality
// (assembler and preprocessor), or bypass a previous driver ('collect2').
// Rewrite linker options, to replace --no-demangle with a custom internal
// option.
if ((A->getOption().matches(options::OPT_Wl_COMMA) ||
A->getOption().matches(options::OPT_Xlinker)) &&
A->containsValue("--no-demangle")) {
// Add the rewritten no-demangle argument.
DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_Xlinker__no_demangle));
// Add the remaining values as Xlinker arguments.
for (StringRef Val : A->getValues())
if (Val != "--no-demangle")
DAL->AddSeparateArg(A, Opts->getOption(options::OPT_Xlinker), Val);
continue;
}
// Rewrite preprocessor options, to replace -Wp,-MD,FOO which is used by
// some build systems. We don't try to be complete here because we don't
// care to encourage this usage model.
if (A->getOption().matches(options::OPT_Wp_COMMA) &&
(A->getValue(0) == StringRef("-MD") ||
A->getValue(0) == StringRef("-MMD"))) {
// Rewrite to -MD/-MMD along with -MF.
if (A->getValue(0) == StringRef("-MD"))
DAL->AddFlagArg(A, Opts->getOption(options::OPT_MD));
else
DAL->AddFlagArg(A, Opts->getOption(options::OPT_MMD));
if (A->getNumValues() == 2)
DAL->AddSeparateArg(A, Opts->getOption(options::OPT_MF),
A->getValue(1));
continue;
}
// Rewrite reserved library names.
if (A->getOption().matches(options::OPT_l)) {
StringRef Value = A->getValue();
// Rewrite unless -nostdlib is present.
if (!HasNostdlib && !HasNodefaultlib && Value == "stdc++") {
DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_reserved_lib_stdcxx));
continue;
}
// Rewrite unconditionally.
if (Value == "cc_kext") {
DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_reserved_lib_cckext));
continue;
}
}
// Pick up inputs via the -- option.
if (A->getOption().matches(options::OPT__DASH_DASH)) {
A->claim();
for (StringRef Val : A->getValues())
DAL->append(MakeInputArg(*DAL, *Opts, Val, false));
continue;
}
DAL->append(A);
}
// Enforce -static if -miamcu is present.
if (Args.hasFlag(options::OPT_miamcu, options::OPT_mno_iamcu, false))
DAL->AddFlagArg(0, Opts->getOption(options::OPT_static));
// Add a default value of -mlinker-version=, if one was given and the user
// didn't specify one.
#if defined(HOST_LINK_VERSION)
if (!Args.hasArg(options::OPT_mlinker_version_EQ) &&
strlen(HOST_LINK_VERSION) > 0) {
DAL->AddJoinedArg(0, Opts->getOption(options::OPT_mlinker_version_EQ),
HOST_LINK_VERSION);
DAL->getLastArg(options::OPT_mlinker_version_EQ)->claim();
}
#endif
return DAL;
}
/// Compute target triple from args.
///
/// This routine provides the logic to compute a target triple from various
/// args passed to the driver and the default triple string.
static llvm::Triple computeTargetTriple(const Driver &D,
StringRef TargetTriple,
const ArgList &Args,
StringRef DarwinArchName = "") {
// FIXME: Already done in Compilation *Driver::BuildCompilation
if (const Arg *A = Args.getLastArg(options::OPT_target))
TargetTriple = A->getValue();
llvm::Triple Target(llvm::Triple::normalize(TargetTriple));
// Handle Apple-specific options available here.
if (Target.isOSBinFormatMachO()) {
// If an explicit Darwin arch name is given, that trumps all.
if (!DarwinArchName.empty()) {
tools::darwin::setTripleTypeForMachOArchName(Target, DarwinArchName);
return Target;
}
// Handle the Darwin '-arch' flag.
if (Arg *A = Args.getLastArg(options::OPT_arch)) {
StringRef ArchName = A->getValue();
tools::darwin::setTripleTypeForMachOArchName(Target, ArchName);
}
}
// Handle pseudo-target flags '-mlittle-endian'/'-EL' and
// '-mbig-endian'/'-EB'.
if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
options::OPT_mbig_endian)) {
if (A->getOption().matches(options::OPT_mlittle_endian)) {
llvm::Triple LE = Target.getLittleEndianArchVariant();
if (LE.getArch() != llvm::Triple::UnknownArch)
Target = std::move(LE);
} else {
llvm::Triple BE = Target.getBigEndianArchVariant();
if (BE.getArch() != llvm::Triple::UnknownArch)
Target = std::move(BE);
}
}
// Skip further flag support on OSes which don't support '-m32' or '-m64'.
if (Target.getArch() == llvm::Triple::tce ||
Target.getOS() == llvm::Triple::Minix)
return Target;
// Handle pseudo-target flags '-m64', '-mx32', '-m32' and '-m16'.
Arg *A = Args.getLastArg(options::OPT_m64, options::OPT_mx32,
options::OPT_m32, options::OPT_m16);
if (A) {
llvm::Triple::ArchType AT = llvm::Triple::UnknownArch;
if (A->getOption().matches(options::OPT_m64)) {
AT = Target.get64BitArchVariant().getArch();
if (Target.getEnvironment() == llvm::Triple::GNUX32)
Target.setEnvironment(llvm::Triple::GNU);
} else if (A->getOption().matches(options::OPT_mx32) &&
Target.get64BitArchVariant().getArch() == llvm::Triple::x86_64) {
AT = llvm::Triple::x86_64;
Target.setEnvironment(llvm::Triple::GNUX32);
} else if (A->getOption().matches(options::OPT_m32)) {
AT = Target.get32BitArchVariant().getArch();
if (Target.getEnvironment() == llvm::Triple::GNUX32)
Target.setEnvironment(llvm::Triple::GNU);
} else if (A->getOption().matches(options::OPT_m16) &&
Target.get32BitArchVariant().getArch() == llvm::Triple::x86) {
AT = llvm::Triple::x86;
Target.setEnvironment(llvm::Triple::CODE16);
}
if (AT != llvm::Triple::UnknownArch && AT != Target.getArch())
Target.setArch(AT);
}
// Handle -miamcu flag.
if (Args.hasFlag(options::OPT_miamcu, options::OPT_mno_iamcu, false)) {
if (Target.get32BitArchVariant().getArch() != llvm::Triple::x86)
D.Diag(diag::err_drv_unsupported_opt_for_target) << "-miamcu"
<< Target.str();
if (A && !A->getOption().matches(options::OPT_m32))
D.Diag(diag::err_drv_argument_not_allowed_with)
<< "-miamcu" << A->getBaseArg().getAsString(Args);
Target.setArch(llvm::Triple::x86);
Target.setArchName("i586");
Target.setEnvironment(llvm::Triple::UnknownEnvironment);
Target.setEnvironmentName("");
Target.setOS(llvm::Triple::ELFIAMCU);
Target.setVendor(llvm::Triple::UnknownVendor);
Target.setVendorName("intel");
}
return Target;
}
// Parse the LTO options and record the type of LTO compilation
// based on which -f(no-)?lto(=.*)? option occurs last.
void Driver::setLTOMode(const llvm::opt::ArgList &Args) {
LTOMode = LTOK_None;
if (!Args.hasFlag(options::OPT_flto, options::OPT_flto_EQ,
options::OPT_fno_lto, false))
return;
StringRef LTOName("full");
const Arg *A = Args.getLastArg(options::OPT_flto_EQ);
if (A)
LTOName = A->getValue();
LTOMode = llvm::StringSwitch<LTOKind>(LTOName)
.Case("full", LTOK_Full)
.Case("thin", LTOK_Thin)
.Default(LTOK_Unknown);
if (LTOMode == LTOK_Unknown) {
assert(A);
Diag(diag::err_drv_unsupported_option_argument) << A->getOption().getName()
<< A->getValue();
}
}
/// Compute the desired OpenMP runtime from the flags provided.
Driver::OpenMPRuntimeKind Driver::getOpenMPRuntime(const ArgList &Args) const {
StringRef RuntimeName(CLANG_DEFAULT_OPENMP_RUNTIME);
const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ);
if (A)
RuntimeName = A->getValue();
auto RT = llvm::StringSwitch<OpenMPRuntimeKind>(RuntimeName)
.Case("libomp", OMPRT_OMP)
.Case("libgomp", OMPRT_GOMP)
.Case("libiomp5", OMPRT_IOMP5)
.Default(OMPRT_Unknown);
if (RT == OMPRT_Unknown) {
if (A)
Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << A->getValue();
else
// FIXME: We could use a nicer diagnostic here.
Diag(diag::err_drv_unsupported_opt) << "-fopenmp";
}
return RT;
}
void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
InputList &Inputs) {
//
// CUDA/HIP
//
// We need to generate a CUDA/HIP toolchain if any of the inputs has a CUDA
// or HIP type. However, mixed CUDA/HIP compilation is not supported.
bool IsCuda =
llvm::any_of(Inputs, [](std::pair<types::ID, const llvm::opt::Arg *> &I) {
return types::isCuda(I.first);
});
bool IsHIP =
llvm::any_of(Inputs,
[](std::pair<types::ID, const llvm::opt::Arg *> &I) {
return types::isHIP(I.first);
}) ||
C.getInputArgs().hasArg(options::OPT_hip_link);
if (IsCuda && IsHIP) {
Diag(clang::diag::err_drv_mix_cuda_hip);
return;
}
if (IsCuda) {
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
const llvm::Triple &HostTriple = HostTC->getTriple();
StringRef DeviceTripleStr;
auto OFK = Action::OFK_Cuda;
DeviceTripleStr =
HostTriple.isArch64Bit() ? "nvptx64-nvidia-cuda" : "nvptx-nvidia-cuda";
llvm::Triple CudaTriple(DeviceTripleStr);
// Use the CUDA and host triples as the key into the ToolChains map,
// because the device toolchain we create depends on both.
auto &CudaTC = ToolChains[CudaTriple.str() + "/" + HostTriple.str()];
if (!CudaTC) {
CudaTC = llvm::make_unique<toolchains::CudaToolChain>(
*this, CudaTriple, *HostTC, C.getInputArgs(), OFK);
}
C.addOffloadDeviceToolChain(CudaTC.get(), OFK);
} else if (IsHIP) {
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
const llvm::Triple &HostTriple = HostTC->getTriple();
StringRef DeviceTripleStr;
auto OFK = Action::OFK_HIP;
DeviceTripleStr = "amdgcn-amd-amdhsa";
llvm::Triple HIPTriple(DeviceTripleStr);
// Use the HIP and host triples as the key into the ToolChains map,
// because the device toolchain we create depends on both.
auto &HIPTC = ToolChains[HIPTriple.str() + "/" + HostTriple.str()];
if (!HIPTC) {
HIPTC = llvm::make_unique<toolchains::HIPToolChain>(
*this, HIPTriple, *HostTC, C.getInputArgs());
}
C.addOffloadDeviceToolChain(HIPTC.get(), OFK);
}
//
// OpenMP
//
// We need to generate an OpenMP toolchain if the user specified targets with
// the -fopenmp-targets option.
if (Arg *OpenMPTargets =
C.getInputArgs().getLastArg(options::OPT_fopenmp_targets_EQ)) {
if (OpenMPTargets->getNumValues()) {
// We expect that -fopenmp-targets is always used in conjunction with the
// option -fopenmp specifying a valid runtime with offloading support,
// i.e. libomp or libiomp.
bool HasValidOpenMPRuntime = C.getInputArgs().hasFlag(
options::OPT_fopenmp, options::OPT_fopenmp_EQ,
options::OPT_fno_openmp, false);
if (HasValidOpenMPRuntime) {
OpenMPRuntimeKind OpenMPKind = getOpenMPRuntime(C.getInputArgs());
HasValidOpenMPRuntime =
OpenMPKind == OMPRT_OMP || OpenMPKind == OMPRT_IOMP5;
}
if (HasValidOpenMPRuntime) {
llvm::StringMap<const char *> FoundNormalizedTriples;
for (const char *Val : OpenMPTargets->getValues()) {
llvm::Triple TT(Val);
std::string NormalizedName = TT.normalize();
// Make sure we don't have a duplicate triple.
auto Duplicate = FoundNormalizedTriples.find(NormalizedName);
if (Duplicate != FoundNormalizedTriples.end()) {
Diag(clang::diag::warn_drv_omp_offload_target_duplicate)
<< Val << Duplicate->second;
continue;
}
// Store the current triple so that we can check for duplicates in the
// following iterations.
FoundNormalizedTriples[NormalizedName] = Val;
// If the specified target is invalid, emit a diagnostic.
if (TT.getArch() == llvm::Triple::UnknownArch)
Diag(clang::diag::err_drv_invalid_omp_target) << Val;
else {
const ToolChain *TC;
// CUDA toolchains have to be selected differently. They pair host
// and device in their implementation.
if (TT.isNVPTX()) {
const ToolChain *HostTC =
C.getSingleOffloadToolChain<Action::OFK_Host>();
assert(HostTC && "Host toolchain should be always defined.");
auto &CudaTC =
ToolChains[TT.str() + "/" + HostTC->getTriple().normalize()];
if (!CudaTC)
CudaTC = llvm::make_unique<toolchains::CudaToolChain>(
*this, TT, *HostTC, C.getInputArgs(), Action::OFK_OpenMP);
TC = CudaTC.get();
} else
TC = &getToolChain(C.getInputArgs(), TT);
C.addOffloadDeviceToolChain(TC, Action::OFK_OpenMP);
}
}
} else
Diag(clang::diag::err_drv_expecting_fopenmp_with_fopenmp_targets);
} else
Diag(clang::diag::warn_drv_empty_joined_argument)
<< OpenMPTargets->getAsString(C.getInputArgs());
}
//
// TODO: Add support for other offloading programming models here.
//
}
/// Looks the given directories for the specified file.
///
/// \param[out] FilePath File path, if the file was found.
/// \param[in] Dirs Directories used for the search.
/// \param[in] FileName Name of the file to search for.
/// \return True if file was found.
///
/// Looks for file specified by FileName sequentially in directories specified
/// by Dirs.
///
static bool searchForFile(SmallVectorImpl<char> &FilePath,
ArrayRef<std::string> Dirs,
StringRef FileName) {
SmallString<128> WPath;
for (const StringRef &Dir : Dirs) {
if (Dir.empty())
continue;
WPath.clear();
llvm::sys::path::append(WPath, Dir, FileName);
llvm::sys::path::native(WPath);
if (llvm::sys::fs::is_regular_file(WPath)) {
FilePath = std::move(WPath);
return true;
}
}
return false;
}
bool Driver::readConfigFile(StringRef FileName) {
// Try reading the given file.
SmallVector<const char *, 32> NewCfgArgs;
if (!llvm::cl::readConfigFile(FileName, Saver, NewCfgArgs)) {
Diag(diag::err_drv_cannot_read_config_file) << FileName;
return true;
}
// Read options from config file.
llvm::SmallString<128> CfgFileName(FileName);
llvm::sys::path::native(CfgFileName);
ConfigFile = CfgFileName.str();
bool ContainErrors;
CfgOptions = llvm::make_unique<InputArgList>(
ParseArgStrings(NewCfgArgs, ContainErrors));
if (ContainErrors) {
CfgOptions.reset();
return true;
}
if (CfgOptions->hasArg(options::OPT_config)) {
CfgOptions.reset();
Diag(diag::err_drv_nested_config_file);
return true;
}
// Claim all arguments that come from a configuration file so that the driver
// does not warn on any that is unused.
for (Arg *A : *CfgOptions)
A->claim();
return false;
}
bool Driver::loadConfigFile() {
std::string CfgFileName;
bool FileSpecifiedExplicitly = false;
// Process options that change search path for config files.
if (CLOptions) {
if (CLOptions->hasArg(options::OPT_config_system_dir_EQ)) {
SmallString<128> CfgDir;
CfgDir.append(
CLOptions->getLastArgValue(options::OPT_config_system_dir_EQ));
if (!CfgDir.empty()) {
if (llvm::sys::fs::make_absolute(CfgDir).value() != 0)
SystemConfigDir.clear();
else
SystemConfigDir = std::string(CfgDir.begin(), CfgDir.end());
}
}
if (CLOptions->hasArg(options::OPT_config_user_dir_EQ)) {
SmallString<128> CfgDir;
CfgDir.append(
CLOptions->getLastArgValue(options::OPT_config_user_dir_EQ));
if (!CfgDir.empty()) {
if (llvm::sys::fs::make_absolute(CfgDir).value() != 0)
UserConfigDir.clear();
else
UserConfigDir = std::string(CfgDir.begin(), CfgDir.end());
}
}
}
// First try to find config file specified in command line.
if (CLOptions) {
std::vector<std::string> ConfigFiles =
CLOptions->getAllArgValues(options::OPT_config);
if (ConfigFiles.size() > 1) {
Diag(diag::err_drv_duplicate_config);
return true;
}
if (!ConfigFiles.empty()) {
CfgFileName = ConfigFiles.front();
assert(!CfgFileName.empty());
// If argument contains directory separator, treat it as a path to
// configuration file.
if (llvm::sys::path::has_parent_path(CfgFileName)) {
SmallString<128> CfgFilePath;
if (llvm::sys::path::is_relative(CfgFileName))
llvm::sys::fs::current_path(CfgFilePath);
llvm::sys::path::append(CfgFilePath, CfgFileName);
if (!llvm::sys::fs::is_regular_file(CfgFilePath)) {
Diag(diag::err_drv_config_file_not_exist) << CfgFilePath;
return true;
}
return readConfigFile(CfgFilePath);
}
FileSpecifiedExplicitly = true;
}
}
// If config file is not specified explicitly, try to deduce configuration
// from executable name. For instance, an executable 'armv7l-clang' will
// search for config file 'armv7l-clang.cfg'.
if (CfgFileName.empty() && !ClangNameParts.TargetPrefix.empty())
CfgFileName = ClangNameParts.TargetPrefix + '-' + ClangNameParts.ModeSuffix;
if (CfgFileName.empty())
return false;
// Determine architecture part of the file name, if it is present.
StringRef CfgFileArch = CfgFileName;
size_t ArchPrefixLen = CfgFileArch.find('-');
if (ArchPrefixLen == StringRef::npos)
ArchPrefixLen = CfgFileArch.size();
llvm::Triple CfgTriple;
CfgFileArch = CfgFileArch.take_front(ArchPrefixLen);
CfgTriple = llvm::Triple(llvm::Triple::normalize(CfgFileArch));
if (CfgTriple.getArch() == llvm::Triple::ArchType::UnknownArch)
ArchPrefixLen = 0;
if (!StringRef(CfgFileName).endswith(".cfg"))
CfgFileName += ".cfg";
// If config file starts with architecture name and command line options
// redefine architecture (with options like -m32 -LE etc), try finding new
// config file with that architecture.
SmallString<128> FixedConfigFile;
size_t FixedArchPrefixLen = 0;
if (ArchPrefixLen) {
// Get architecture name from config file name like 'i386.cfg' or
// 'armv7l-clang.cfg'.
// Check if command line options changes effective triple.
llvm::Triple EffectiveTriple = computeTargetTriple(*this,
CfgTriple.getTriple(), *CLOptions);
if (CfgTriple.getArch() != EffectiveTriple.getArch()) {
FixedConfigFile = EffectiveTriple.getArchName();
FixedArchPrefixLen = FixedConfigFile.size();
// Append the rest of original file name so that file name transforms
// like: i386-clang.cfg -> x86_64-clang.cfg.
if (ArchPrefixLen < CfgFileName.size())
FixedConfigFile += CfgFileName.substr(ArchPrefixLen);
}
}
// Prepare list of directories where config file is searched for.
SmallVector<std::string, 3> CfgFileSearchDirs;
CfgFileSearchDirs.push_back(UserConfigDir);
CfgFileSearchDirs.push_back(SystemConfigDir);
CfgFileSearchDirs.push_back(Dir);
// Try to find config file. First try file with corrected architecture.
llvm::SmallString<128> CfgFilePath;
if (!FixedConfigFile.empty()) {
if (searchForFile(CfgFilePath, CfgFileSearchDirs, FixedConfigFile))
return readConfigFile(CfgFilePath);
// If 'x86_64-clang.cfg' was not found, try 'x86_64.cfg'.
FixedConfigFile.resize(FixedArchPrefixLen);
FixedConfigFile.append(".cfg");
if (searchForFile(CfgFilePath, CfgFileSearchDirs, FixedConfigFile))
return readConfigFile(CfgFilePath);
}
// Then try original file name.
if (searchForFile(CfgFilePath, CfgFileSearchDirs, CfgFileName))
return readConfigFile(CfgFilePath);
// Finally try removing driver mode part: 'x86_64-clang.cfg' -> 'x86_64.cfg'.
if (!ClangNameParts.ModeSuffix.empty() &&
!ClangNameParts.TargetPrefix.empty()) {
CfgFileName.assign(ClangNameParts.TargetPrefix);
CfgFileName.append(".cfg");
if (searchForFile(CfgFilePath, CfgFileSearchDirs, CfgFileName))
return readConfigFile(CfgFilePath);
}
// Report error but only if config file was specified explicitly, by option
// --config. If it was deduced from executable name, it is not an error.
if (FileSpecifiedExplicitly) {
Diag(diag::err_drv_config_file_not_found) << CfgFileName;
for (const std::string &SearchDir : CfgFileSearchDirs)
if (!SearchDir.empty())
Diag(diag::note_drv_config_file_searched_in) << SearchDir;
return true;
}
return false;
}
Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
llvm::PrettyStackTraceString CrashInfo("Compilation construction");
// FIXME: Handle environment options which affect driver behavior, somewhere
// (client?). GCC_EXEC_PREFIX, LPATH, CC_PRINT_OPTIONS.
if (Optional<std::string> CompilerPathValue =
llvm::sys::Process::GetEnv("COMPILER_PATH")) {
StringRef CompilerPath = *CompilerPathValue;
while (!CompilerPath.empty()) {
std::pair<StringRef, StringRef> Split =
CompilerPath.split(llvm::sys::EnvPathSeparator);
PrefixDirs.push_back(Split.first);
CompilerPath = Split.second;
}
}
// We look for the driver mode option early, because the mode can affect
// how other options are parsed.
ParseDriverMode(ClangExecutable, ArgList.slice(1));
// FIXME: What are we going to do with -V and -b?
// Arguments specified in command line.
bool ContainsError;
CLOptions = llvm::make_unique<InputArgList>(
ParseArgStrings(ArgList.slice(1), ContainsError));
// Try parsing configuration file.
if (!ContainsError)
ContainsError = loadConfigFile();
bool HasConfigFile = !ContainsError && (CfgOptions.get() != nullptr);
// All arguments, from both config file and command line.
InputArgList Args = std::move(HasConfigFile ? std::move(*CfgOptions)
: std::move(*CLOptions));
if (HasConfigFile)
for (auto *Opt : *CLOptions) {
if (Opt->getOption().matches(options::OPT_config))
continue;
unsigned Index = Args.MakeIndex(Opt->getSpelling());
const Arg *BaseArg = &Opt->getBaseArg();
if (BaseArg == Opt)
BaseArg = nullptr;
Arg *Copy = new llvm::opt::Arg(Opt->getOption(), Opt->getSpelling(),
Index, BaseArg);
Copy->getValues() = Opt->getValues();
if (Opt->isClaimed())
Copy->claim();
Args.append(Copy);
}
// FIXME: This stuff needs to go into the Compilation, not the driver.
bool CCCPrintPhases;
// Silence driver warnings if requested
Diags.setIgnoreAllWarnings(Args.hasArg(options::OPT_w));
// -no-canonical-prefixes is used very early in main.
Args.ClaimAllArgs(options::OPT_no_canonical_prefixes);
// Ignore -pipe.
Args.ClaimAllArgs(options::OPT_pipe);
// Extract -ccc args.
//
// FIXME: We need to figure out where this behavior should live. Most of it
// should be outside in the client; the parts that aren't should have proper
// options, either by introducing new ones or by overloading gcc ones like -V
// or -b.
CCCPrintPhases = Args.hasArg(options::OPT_ccc_print_phases);
CCCPrintBindings = Args.hasArg(options::OPT_ccc_print_bindings);
if (const Arg *A = Args.getLastArg(options::OPT_ccc_gcc_name))
CCCGenericGCCName = A->getValue();
CCCUsePCH =
Args.hasFlag(options::OPT_ccc_pch_is_pch, options::OPT_ccc_pch_is_pth);
GenReproducer = Args.hasFlag(options::OPT_gen_reproducer,
options::OPT_fno_crash_diagnostics,
!!::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"));
// FIXME: TargetTriple is used by the target-prefixed calls to as/ld
// and getToolChain is const.
if (IsCLMode()) {
// clang-cl targets MSVC-style Win32.
llvm::Triple T(TargetTriple);
T.setOS(llvm::Triple::Win32);
T.setVendor(llvm::Triple::PC);
T.setEnvironment(llvm::Triple::MSVC);
T.setObjectFormat(llvm::Triple::COFF);
TargetTriple = T.str();
}
if (const Arg *A = Args.getLastArg(options::OPT_target))
TargetTriple = A->getValue();
if (const Arg *A = Args.getLastArg(options::OPT_ccc_install_dir))
Dir = InstalledDir = A->getValue();
for (const Arg *A : Args.filtered(options::OPT_B)) {
A->claim();
PrefixDirs.push_back(A->getValue(0));
}
if (const Arg *A = Args.getLastArg(options::OPT__sysroot_EQ))
SysRoot = A->getValue();
if (const Arg *A = Args.getLastArg(options::OPT__dyld_prefix_EQ))
DyldPrefix = A->getValue();
if (const Arg *A = Args.getLastArg(options::OPT_resource_dir))
ResourceDir = A->getValue();
if (const Arg *A = Args.getLastArg(options::OPT_save_temps_EQ)) {
SaveTemps = llvm::StringSwitch<SaveTempsMode>(A->getValue())
.Case("cwd", SaveTempsCwd)
.Case("obj", SaveTempsObj)
.Default(SaveTempsCwd);
}
setLTOMode(Args);
// Process -fembed-bitcode= flags.
if (Arg *A = Args.getLastArg(options::OPT_fembed_bitcode_EQ)) {
StringRef Name = A->getValue();
unsigned Model = llvm::StringSwitch<unsigned>(Name)
.Case("off", EmbedNone)
.Case("all", EmbedBitcode)
.Case("bitcode", EmbedBitcode)
.Case("marker", EmbedMarker)
.Default(~0U);
if (Model == ~0U) {
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
<< Name;
} else
BitcodeEmbed = static_cast<BitcodeEmbedMode>(Model);
}
std::unique_ptr<llvm::opt::InputArgList> UArgs =
llvm::make_unique<InputArgList>(std::move(Args));
// Perform the default argument translations.
DerivedArgList *TranslatedArgs = TranslateInputArgs(*UArgs);
// Owned by the host.
const ToolChain &TC = getToolChain(
*UArgs, computeTargetTriple(*this, TargetTriple, *UArgs));
// The compilation takes ownership of Args.
Compilation *C = new Compilation(*this, TC, UArgs.release(), TranslatedArgs,
ContainsError);
if (!HandleImmediateArgs(*C))
return C;
// Construct the list of inputs.
InputList Inputs;
BuildInputs(C->getDefaultToolChain(), *TranslatedArgs, Inputs);
// Populate the tool chains for the offloading devices, if any.
CreateOffloadingDeviceToolChains(*C, Inputs);
// Construct the list of abstract actions to perform for this compilation. On
// MachO targets this uses the driver-driver and universal actions.
if (TC.getTriple().isOSBinFormatMachO())
BuildUniversalActions(*C, C->getDefaultToolChain(), Inputs);
else
BuildActions(*C, C->getArgs(), Inputs, C->getActions());
if (CCCPrintPhases) {
PrintActions(*C);
return C;
}
BuildJobs(*C);
return C;
}
static void printArgList(raw_ostream &OS, const llvm::opt::ArgList &Args) {
llvm::opt::ArgStringList ASL;
for (const auto *A : Args)
A->render(Args, ASL);
for (auto I = ASL.begin(), E = ASL.end(); I != E; ++I) {
if (I != ASL.begin())
OS << ' ';
Command::printArg(OS, *I, true);
}
OS << '\n';
}
bool Driver::getCrashDiagnosticFile(StringRef ReproCrashFilename,
SmallString<128> &CrashDiagDir) {
using namespace llvm::sys;
assert(llvm::Triple(llvm::sys::getProcessTriple()).isOSDarwin() &&
"Only knows about .crash files on Darwin");
// The .crash file can be found on at ~/Library/Logs/DiagnosticReports/
// (or /Library/Logs/DiagnosticReports for root) and has the filename pattern
// clang-<VERSION>_<YYYY-MM-DD-HHMMSS>_<hostname>.crash.
path::home_directory(CrashDiagDir);
if (CrashDiagDir.startswith("/var/root"))
CrashDiagDir = "/";
path::append(CrashDiagDir, "Library/Logs/DiagnosticReports");
int PID =
#if LLVM_ON_UNIX
getpid();
#else
0;
#endif
std::error_code EC;
fs::file_status FileStatus;
TimePoint<> LastAccessTime;
SmallString<128> CrashFilePath;
// Lookup the .crash files and get the one generated by a subprocess spawned
// by this driver invocation.
for (fs::directory_iterator File(CrashDiagDir, EC), FileEnd;
File != FileEnd && !EC; File.increment(EC)) {
StringRef FileName = path::filename(File->path());
if (!FileName.startswith(Name))
continue;
if (fs::status(File->path(), FileStatus))
continue;
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CrashFile =
llvm::MemoryBuffer::getFile(File->path());
if (!CrashFile)
continue;
// The first line should start with "Process:", otherwise this isn't a real
// .crash file.
StringRef Data = CrashFile.get()->getBuffer();
if (!Data.startswith("Process:"))
continue;
// Parse parent process pid line, e.g: "Parent Process: clang-4.0 [79141]"
size_t ParentProcPos = Data.find("Parent Process:");
if (ParentProcPos == StringRef::npos)
continue;
size_t LineEnd = Data.find_first_of("\n", ParentProcPos);
if (LineEnd == StringRef::npos)
continue;
StringRef ParentProcess = Data.slice(ParentProcPos+15, LineEnd).trim();
int OpenBracket = -1, CloseBracket = -1;
for (size_t i = 0, e = ParentProcess.size(); i < e; ++i) {
if (ParentProcess[i] == '[')
OpenBracket = i;
if (ParentProcess[i] == ']')
CloseBracket = i;
}
// Extract the parent process PID from the .crash file and check whether
// it matches this driver invocation pid.
int CrashPID;
if (OpenBracket < 0 || CloseBracket < 0 ||
ParentProcess.slice(OpenBracket + 1, CloseBracket)
.getAsInteger(10, CrashPID) || CrashPID != PID) {
continue;
}
// Found a .crash file matching the driver pid. To avoid getting an older
// and misleading crash file, continue looking for the most recent.
// FIXME: the driver can dispatch multiple cc1 invocations, leading to
// multiple crashes poiting to the same parent process. Since the driver
// does not collect pid information for the dispatched invocation there's
// currently no way to distinguish among them.
const auto FileAccessTime = FileStatus.getLastModificationTime();
if (FileAccessTime > LastAccessTime) {
CrashFilePath.assign(File->path());
LastAccessTime = FileAccessTime;
}
}
// If found, copy it over to the location of other reproducer files.
if (!CrashFilePath.empty()) {
EC = fs::copy_file(CrashFilePath, ReproCrashFilename);
if (EC)
return false;
return true;
}
return false;
}
// When clang crashes, produce diagnostic information including the fully
// preprocessed source file(s). Request that the developer attach the
// diagnostic information to a bug report.
void Driver::generateCompilationDiagnostics(
Compilation &C, const Command &FailingCommand,
StringRef AdditionalInformation, CompilationDiagnosticReport *Report) {
if (C.getArgs().hasArg(options::OPT_fno_crash_diagnostics))
return;
// Don't try to generate diagnostics for link or dsymutil jobs.
if (FailingCommand.getCreator().isLinkJob() ||
FailingCommand.getCreator().isDsymutilJob())
return;
// Print the version of the compiler.
PrintVersion(C, llvm::errs());
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "PLEASE submit a bug report to " BUG_REPORT_URL " and include the "
"crash backtrace, preprocessed source, and associated run script.";
// Suppress driver output and emit preprocessor output to temp file.
Mode = CPPMode;
CCGenDiagnostics = true;
// Save the original job command(s).
Command Cmd = FailingCommand;
// Keep track of whether we produce any errors while trying to produce
// preprocessed sources.
DiagnosticErrorTrap Trap(Diags);
// Suppress tool output.
C.initCompilationForDiagnostics();
// Construct the list of inputs.
InputList Inputs;
BuildInputs(C.getDefaultToolChain(), C.getArgs(), Inputs);
for (InputList::iterator it = Inputs.begin(), ie = Inputs.end(); it != ie;) {
bool IgnoreInput = false;
// Ignore input from stdin or any inputs that cannot be preprocessed.
// Check type first as not all linker inputs have a value.
if (types::getPreprocessedType(it->first) == types::TY_INVALID) {
IgnoreInput = true;
} else if (!strcmp(it->second->getValue(), "-")) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Error generating preprocessed source(s) - "
"ignoring input from stdin.";
IgnoreInput = true;
}
if (IgnoreInput) {
it = Inputs.erase(it);
ie = Inputs.end();
} else {
++it;
}
}
if (Inputs.empty()) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Error generating preprocessed source(s) - "
"no preprocessable inputs.";
return;
}
// Don't attempt to generate preprocessed files if multiple -arch options are
// used, unless they're all duplicates.
llvm::StringSet<> ArchNames;
for (const Arg *A : C.getArgs()) {
if (A->getOption().matches(options::OPT_arch)) {
StringRef ArchName = A->getValue();
ArchNames.insert(ArchName);
}
}
if (ArchNames.size() > 1) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Error generating preprocessed source(s) - cannot generate "
"preprocessed source with multiple -arch options.";
return;
}
// Construct the list of abstract actions to perform for this compilation. On
// Darwin OSes this uses the driver-driver and builds universal actions.
const ToolChain &TC = C.getDefaultToolChain();
if (TC.getTriple().isOSBinFormatMachO())
BuildUniversalActions(C, TC, Inputs);
else
BuildActions(C, C.getArgs(), Inputs, C.getActions());
BuildJobs(C);
// If there were errors building the compilation, quit now.
if (Trap.hasErrorOccurred()) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Error generating preprocessed source(s).";
return;
}
// Generate preprocessed output.
SmallVector<std::pair<int, const Command *>, 4> FailingCommands;
C.ExecuteJobs(C.getJobs(), FailingCommands);
// If any of the preprocessing commands failed, clean up and exit.
if (!FailingCommands.empty()) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Error generating preprocessed source(s).";
return;
}
const ArgStringList &TempFiles = C.getTempFiles();
if (TempFiles.empty()) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Error generating preprocessed source(s).";
return;
}
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "\n********************\n\n"
"PLEASE ATTACH THE FOLLOWING FILES TO THE BUG REPORT:\n"
"Preprocessed source(s) and associated run script(s) are located at:";
SmallString<128> VFS;
SmallString<128> ReproCrashFilename;
for (const char *TempFile : TempFiles) {
Diag(clang::diag::note_drv_command_failed_diag_msg) << TempFile;
if (Report)
Report->TemporaryFiles.push_back(TempFile);
if (ReproCrashFilename.empty()) {
ReproCrashFilename = TempFile;
llvm::sys::path::replace_extension(ReproCrashFilename, ".crash");
}
if (StringRef(TempFile).endswith(".cache")) {
// In some cases (modules) we'll dump extra data to help with reproducing
// the crash into a directory next to the output.
VFS = llvm::sys::path::filename(TempFile);
llvm::sys::path::append(VFS, "vfs", "vfs.yaml");
}
}
// Assume associated files are based off of the first temporary file.
CrashReportInfo CrashInfo(TempFiles[0], VFS);
llvm::SmallString<128> Script(CrashInfo.Filename);
llvm::sys::path::replace_extension(Script, "sh");
std::error_code EC;
llvm::raw_fd_ostream ScriptOS(Script, EC, llvm::sys::fs::CD_CreateNew);
if (EC) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Error generating run script: " << Script << " " << EC.message();
} else {
ScriptOS << "# Crash reproducer for " << getClangFullVersion() << "\n"
<< "# Driver args: ";
printArgList(ScriptOS, C.getInputArgs());
ScriptOS << "# Original command: ";
Cmd.Print(ScriptOS, "\n", /*Quote=*/true);
Cmd.Print(ScriptOS, "\n", /*Quote=*/true, &CrashInfo);
if (!AdditionalInformation.empty())
ScriptOS << "\n# Additional information: " << AdditionalInformation
<< "\n";
if (Report)
Report->TemporaryFiles.push_back(Script.str());
Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
}
// On darwin, provide information about the .crash diagnostic report.
if (llvm::Triple(llvm::sys::getProcessTriple()).isOSDarwin()) {
SmallString<128> CrashDiagDir;
if (getCrashDiagnosticFile(ReproCrashFilename, CrashDiagDir)) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< ReproCrashFilename.str();
} else { // Suggest a directory for the user to look for .crash files.
llvm::sys::path::append(CrashDiagDir, Name);
CrashDiagDir += "_<YYYY-MM-DD-HHMMSS>_<hostname>.crash";
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Crash backtrace is located in";
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< CrashDiagDir.str();
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "(choose the .crash file that corresponds to your crash)";
}
}
for (const auto &A : C.getArgs().filtered(options::OPT_frewrite_map_file,
options::OPT_frewrite_map_file_EQ))
Diag(clang::diag::note_drv_command_failed_diag_msg) << A->getValue();
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "\n\n********************";
}
void Driver::setUpResponseFiles(Compilation &C, Command &Cmd) {
// Since commandLineFitsWithinSystemLimits() may underestimate system's capacity
// if the tool does not support response files, there is a chance/ that things
// will just work without a response file, so we silently just skip it.
if (Cmd.getCreator().getResponseFilesSupport() == Tool::RF_None ||
llvm::sys::commandLineFitsWithinSystemLimits(Cmd.getExecutable(), Cmd.getArguments()))
return;
std::string TmpName = GetTemporaryPath("response", "txt");
Cmd.setResponseFile(C.addTempFile(C.getArgs().MakeArgString(TmpName)));
}
int Driver::ExecuteCompilation(
Compilation &C,
SmallVectorImpl<std::pair<int, const Command *>> &FailingCommands) {
// Just print if -### was present.
if (C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
C.getJobs().Print(llvm::errs(), "\n", true);
return 0;
}
// If there were errors building the compilation, quit now.
if (Diags.hasErrorOccurred())
return 1;
// Set up response file names for each command, if necessary
for (auto &Job : C.getJobs())
setUpResponseFiles(C, Job);
C.ExecuteJobs(C.getJobs(), FailingCommands);
// If the command succeeded, we are done.
if (FailingCommands.empty())
return 0;
// Otherwise, remove result files and print extra information about abnormal
// failures.
for (const auto &CmdPair : FailingCommands) {
int Res = CmdPair.first;
const Command *FailingCommand = CmdPair.second;
// Remove result files if we're not saving temps.
if (!isSaveTempsEnabled()) {
const JobAction *JA = cast<JobAction>(&FailingCommand->getSource());
C.CleanupFileMap(C.getResultFiles(), JA, true);
// Failure result files are valid unless we crashed.
if (Res < 0)
C.CleanupFileMap(C.getFailureResultFiles(), JA, true);
}
// Print extra information about abnormal failures, if possible.
//
// This is ad-hoc, but we don't want to be excessively noisy. If the result
// status was 1, assume the command failed normally. In particular, if it
// was the compiler then assume it gave a reasonable error code. Failures
// in other tools are less common, and they generally have worse
// diagnostics, so always print the diagnostic there.
const Tool &FailingTool = FailingCommand->getCreator();
if (!FailingCommand->getCreator().hasGoodDiagnostics() || Res != 1) {
// FIXME: See FIXME above regarding result code interpretation.
if (Res < 0)
Diag(clang::diag::err_drv_command_signalled)
<< FailingTool.getShortName();
else
Diag(clang::diag::err_drv_command_failed) << FailingTool.getShortName()
<< Res;
}
}
return 0;
}
void Driver::PrintHelp(bool ShowHidden) const {
unsigned IncludedFlagsBitmask;
unsigned ExcludedFlagsBitmask;
std::tie(IncludedFlagsBitmask, ExcludedFlagsBitmask) =
getIncludeExcludeOptionFlagMasks();
ExcludedFlagsBitmask |= options::NoDriverOption;
if (!ShowHidden)
ExcludedFlagsBitmask |= HelpHidden;
getOpts().PrintHelp(llvm::outs(), Name.c_str(), DriverTitle.c_str(),
IncludedFlagsBitmask, ExcludedFlagsBitmask,
/*ShowAllAliases=*/false);
}
void Driver::PrintVersion(const Compilation &C, raw_ostream &OS) const {
// FIXME: The following handlers should use a callback mechanism, we don't
// know what the client would like to do.
OS << getClangFullVersion() << '\n';
const ToolChain &TC = C.getDefaultToolChain();
OS << "Target: " << TC.getTripleString() << '\n';
// Print the threading model.
if (Arg *A = C.getArgs().getLastArg(options::OPT_mthread_model)) {
// Don't print if the ToolChain would have barfed on it already
if (TC.isThreadModelSupported(A->getValue()))
OS << "Thread model: " << A->getValue();
} else
OS << "Thread model: " << TC.getThreadModel();
OS << '\n';
// Print out the install directory.
OS << "InstalledDir: " << InstalledDir << '\n';
// If configuration file was used, print its path.
if (!ConfigFile.empty())
OS << "Configuration file: " << ConfigFile << '\n';
}
/// PrintDiagnosticCategories - Implement the --print-diagnostic-categories
/// option.
static void PrintDiagnosticCategories(raw_ostream &OS) {
// Skip the empty category.
for (unsigned i = 1, max = DiagnosticIDs::getNumberOfCategories(); i != max;
++i)
OS << i << ',' << DiagnosticIDs::getCategoryNameFromID(i) << '\n';
}
void Driver::HandleAutocompletions(StringRef PassedFlags) const {
if (PassedFlags == "")
return;
// Print out all options that start with a given argument. This is used for
// shell autocompletion.
std::vector<std::string> SuggestedCompletions;
std::vector<std::string> Flags;
unsigned short DisableFlags =
options::NoDriverOption | options::Unsupported | options::Ignored;
// Parse PassedFlags by "," as all the command-line flags are passed to this
// function separated by ","
StringRef TargetFlags = PassedFlags;
while (TargetFlags != "") {
StringRef CurFlag;
std::tie(CurFlag, TargetFlags) = TargetFlags.split(",");
Flags.push_back(std::string(CurFlag));
}
// We want to show cc1-only options only when clang is invoked with -cc1 or
// -Xclang.
if (std::find(Flags.begin(), Flags.end(), "-Xclang") != Flags.end() ||
std::find(Flags.begin(), Flags.end(), "-cc1") != Flags.end())
DisableFlags &= ~options::NoDriverOption;
StringRef Cur;
Cur = Flags.at(Flags.size() - 1);
StringRef Prev;
if (Flags.size() >= 2) {
Prev = Flags.at(Flags.size() - 2);
SuggestedCompletions = Opts->suggestValueCompletions(Prev, Cur);
}
if (SuggestedCompletions.empty())
SuggestedCompletions = Opts->suggestValueCompletions(Cur, "");
if (SuggestedCompletions.empty()) {
// If the flag is in the form of "--autocomplete=-foo",
// we were requested to print out all option names that start with "-foo".
// For example, "--autocomplete=-fsyn" is expanded to "-fsyntax-only".
SuggestedCompletions = Opts->findByPrefix(Cur, DisableFlags);
// We have to query the -W flags manually as they're not in the OptTable.
// TODO: Find a good way to add them to OptTable instead and them remove
// this code.
for (StringRef S : DiagnosticIDs::getDiagnosticFlags())
if (S.startswith(Cur))
SuggestedCompletions.push_back(S);
}
// Sort the autocomplete candidates so that shells print them out in a
// deterministic order. We could sort in any way, but we chose
// case-insensitive sorting for consistency with the -help option
// which prints out options in the case-insensitive alphabetical order.
llvm::sort(SuggestedCompletions.begin(), SuggestedCompletions.end(),
[](StringRef A, StringRef B) {
if (int X = A.compare_lower(B))
return X < 0;
return A.compare(B) > 0;
});
llvm::outs() << llvm::join(SuggestedCompletions, "\n") << '\n';
}
bool Driver::HandleImmediateArgs(const Compilation &C) {
// The order these options are handled in gcc is all over the place, but we
// don't expect inconsistencies w.r.t. that to matter in practice.
if (C.getArgs().hasArg(options::OPT_dumpmachine)) {
llvm::outs() << C.getDefaultToolChain().getTripleString() << '\n';
return false;
}
if (C.getArgs().hasArg(options::OPT_dumpversion)) {
// Since -dumpversion is only implemented for pedantic GCC compatibility, we
// return an answer which matches our definition of __VERSION__.
//
// If we want to return a more correct answer some day, then we should
// introduce a non-pedantically GCC compatible mode to Clang in which we
// provide sensible definitions for -dumpversion, __VERSION__, etc.
llvm::outs() << "4.2.1\n";
return false;
}
if (C.getArgs().hasArg(options::OPT__print_diagnostic_categories)) {
PrintDiagnosticCategories(llvm::outs());
return false;
}
if (C.getArgs().hasArg(options::OPT_help) ||
C.getArgs().hasArg(options::OPT__help_hidden)) {
PrintHelp(C.getArgs().hasArg(options::OPT__help_hidden));
return false;
}
if (C.getArgs().hasArg(options::OPT__version)) {
// Follow gcc behavior and use stdout for --version and stderr for -v.
PrintVersion(C, llvm::outs());
return false;
}
if (C.getArgs().hasArg(options::OPT_v) ||
C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
PrintVersion(C, llvm::errs());
SuppressMissingInputWarning = true;
}
if (C.getArgs().hasArg(options::OPT_v)) {
if (!SystemConfigDir.empty())
llvm::errs() << "System configuration file directory: "
<< SystemConfigDir << "\n";
if (!UserConfigDir.empty())
llvm::errs() << "User configuration file directory: "
<< UserConfigDir << "\n";
}
const ToolChain &TC = C.getDefaultToolChain();
if (C.getArgs().hasArg(options::OPT_v))
TC.printVerboseInfo(llvm::errs());
if (C.getArgs().hasArg(options::OPT_print_resource_dir)) {
llvm::outs() << ResourceDir << '\n';
return false;
}
if (C.getArgs().hasArg(options::OPT_print_search_dirs)) {
llvm::outs() << "programs: =";
bool separator = false;
for (const std::string &Path : TC.getProgramPaths()) {
if (separator)
llvm::outs() << ':';
llvm::outs() << Path;
separator = true;
}
llvm::outs() << "\n";
llvm::outs() << "libraries: =" << ResourceDir;
StringRef sysroot = C.getSysRoot();
for (const std::string &Path : TC.getFilePaths()) {
// Always print a separator. ResourceDir was the first item shown.
llvm::outs() << ':';
// Interpretation of leading '=' is needed only for NetBSD.
if (Path[0] == '=')
llvm::outs() << sysroot << Path.substr(1);
else
llvm::outs() << Path;
}
llvm::outs() << "\n";
return false;
}
// FIXME: The following handlers should use a callback mechanism, we don't
// know what the client would like to do.
if (Arg *A = C.getArgs().getLastArg(options::OPT_print_file_name_EQ)) {
llvm::outs() << GetFilePath(A->getValue(), TC) << "\n";
return false;
}
if (Arg *A = C.getArgs().getLastArg(options::OPT_print_prog_name_EQ)) {
StringRef ProgName = A->getValue();
// Null program name cannot have a path.
if (! ProgName.empty())
llvm::outs() << GetProgramPath(ProgName, TC);
llvm::outs() << "\n";
return false;
}
if (Arg *A = C.getArgs().getLastArg(options::OPT_autocomplete)) {
StringRef PassedFlags = A->getValue();
HandleAutocompletions(PassedFlags);
return false;
}
if (C.getArgs().hasArg(options::OPT_print_libgcc_file_name)) {
ToolChain::RuntimeLibType RLT = TC.GetRuntimeLibType(C.getArgs());
const llvm::Triple Triple(TC.ComputeEffectiveClangTriple(C.getArgs()));
RegisterEffectiveTriple TripleRAII(TC, Triple);
switch (RLT) {
case ToolChain::RLT_CompilerRT:
llvm::outs() << TC.getCompilerRT(C.getArgs(), "builtins") << "\n";
break;
case ToolChain::RLT_Libgcc:
llvm::outs() << GetFilePath("libgcc.a", TC) << "\n";
break;
}
return false;
}
if (C.getArgs().hasArg(options::OPT_print_multi_lib)) {
for (const Multilib &Multilib : TC.getMultilibs())
llvm::outs() << Multilib << "\n";
return false;
}
if (C.getArgs().hasArg(options::OPT_print_multi_directory)) {
for (const Multilib &Multilib : TC.getMultilibs()) {
if (Multilib.gccSuffix().empty())
llvm::outs() << ".\n";
else {
StringRef Suffix(Multilib.gccSuffix());
assert(Suffix.front() == '/');
llvm::outs() << Suffix.substr(1) << "\n";
}
}
return false;
}
return true;
}
// Display an action graph human-readably. Action A is the "sink" node
// and latest-occuring action. Traversal is in pre-order, visiting the
// inputs to each action before printing the action itself.
static unsigned PrintActions1(const Compilation &C, Action *A,
std::map<Action *, unsigned> &Ids) {
if (Ids.count(A)) // A was already visited.
return Ids[A];
std::string str;
llvm::raw_string_ostream os(str);
os << Action::getClassName(A->getKind()) << ", ";
if (InputAction *IA = dyn_cast<InputAction>(A)) {
os << "\"" << IA->getInputArg().getValue() << "\"";
} else if (BindArchAction *BIA = dyn_cast<BindArchAction>(A)) {
os << '"' << BIA->getArchName() << '"' << ", {"
<< PrintActions1(C, *BIA->input_begin(), Ids) << "}";
} else if (OffloadAction *OA = dyn_cast<OffloadAction>(A)) {
bool IsFirst = true;
OA->doOnEachDependence(
[&](Action *A, const ToolChain *TC, const char *BoundArch) {
// E.g. for two CUDA device dependences whose bound arch is sm_20 and
// sm_35 this will generate:
// "cuda-device" (nvptx64-nvidia-cuda:sm_20) {#ID}, "cuda-device"
// (nvptx64-nvidia-cuda:sm_35) {#ID}
if (!IsFirst)
os << ", ";
os << '"';
if (TC)
os << A->getOffloadingKindPrefix();
else
os << "host";
os << " (";
os << TC->getTriple().normalize();
if (BoundArch)
os << ":" << BoundArch;
os << ")";
os << '"';
os << " {" << PrintActions1(C, A, Ids) << "}";
IsFirst = false;
});
} else {
const ActionList *AL = &A->getInputs();
if (AL->size()) {
const char *Prefix = "{";
for (Action *PreRequisite : *AL) {
os << Prefix << PrintActions1(C, PreRequisite, Ids);
Prefix = ", ";
}
os << "}";
} else
os << "{}";
}
// Append offload info for all options other than the offloading action
// itself (e.g. (cuda-device, sm_20) or (cuda-host)).
std::string offload_str;
llvm::raw_string_ostream offload_os(offload_str);
if (!isa<OffloadAction>(A)) {
auto S = A->getOffloadingKindPrefix();
if (!S.empty()) {
offload_os << ", (" << S;
if (A->getOffloadingArch())
offload_os << ", " << A->getOffloadingArch();
offload_os << ")";
}
}
unsigned Id = Ids.size();
Ids[A] = Id;
llvm::errs() << Id << ": " << os.str() << ", "
<< types::getTypeName(A->getType()) << offload_os.str() << "\n";
return Id;
}
// Print the action graphs in a compilation C.
// For example "clang -c file1.c file2.c" is composed of two subgraphs.
void Driver::PrintActions(const Compilation &C) const {
std::map<Action *, unsigned> Ids;
for (Action *A : C.getActions())
PrintActions1(C, A, Ids);
}
/// Check whether the given input tree contains any compilation or
/// assembly actions.
static bool ContainsCompileOrAssembleAction(const Action *A) {
if (isa<CompileJobAction>(A) || isa<BackendJobAction>(A) ||
isa<AssembleJobAction>(A))
return true;
for (const Action *Input : A->inputs())
if (ContainsCompileOrAssembleAction(Input))
return true;
return false;
}
void Driver::BuildUniversalActions(Compilation &C, const ToolChain &TC,
const InputList &BAInputs) const {
DerivedArgList &Args = C.getArgs();
ActionList &Actions = C.getActions();
llvm::PrettyStackTraceString CrashInfo("Building universal build actions");
// Collect the list of architectures. Duplicates are allowed, but should only
// be handled once (in the order seen).
llvm::StringSet<> ArchNames;
SmallVector<const char *, 4> Archs;
for (Arg *A : Args) {
if (A->getOption().matches(options::OPT_arch)) {
// Validate the option here; we don't save the type here because its
// particular spelling may participate in other driver choices.
llvm::Triple::ArchType Arch =
tools::darwin::getArchTypeForMachOArchName(A->getValue());
if (Arch == llvm::Triple::UnknownArch) {
Diag(clang::diag::err_drv_invalid_arch_name) << A->getAsString(Args);
continue;
}
A->claim();
if (ArchNames.insert(A->getValue()).second)
Archs.push_back(A->getValue());
}
}
// When there is no explicit arch for this platform, make sure we still bind
// the architecture (to the default) so that -Xarch_ is handled correctly.
if (!Archs.size())
Archs.push_back(Args.MakeArgString(TC.getDefaultUniversalArchName()));
ActionList SingleActions;
BuildActions(C, Args, BAInputs, SingleActions);
// Add in arch bindings for every top level action, as well as lipo and
// dsymutil steps if needed.
for (Action* Act : SingleActions) {
// Make sure we can lipo this kind of output. If not (and it is an actual
// output) then we disallow, since we can't create an output file with the
// right name without overwriting it. We could remove this oddity by just
// changing the output names to include the arch, which would also fix
// -save-temps. Compatibility wins for now.
if (Archs.size() > 1 && !types::canLipoType(Act->getType()))
Diag(clang::diag::err_drv_invalid_output_with_multiple_archs)
<< types::getTypeName(Act->getType());
ActionList Inputs;
for (unsigned i = 0, e = Archs.size(); i != e; ++i)
Inputs.push_back(C.MakeAction<BindArchAction>(Act, Archs[i]));
// Lipo if necessary, we do it this way because we need to set the arch flag
// so that -Xarch_ gets overwritten.
if (Inputs.size() == 1 || Act->getType() == types::TY_Nothing)
Actions.append(Inputs.begin(), Inputs.end());
else
Actions.push_back(C.MakeAction<LipoJobAction>(Inputs, Act->getType()));
// Handle debug info queries.
Arg *A = Args.getLastArg(options::OPT_g_Group);
if (A && !A->getOption().matches(options::OPT_g0) &&
!A->getOption().matches(options::OPT_gstabs) &&
ContainsCompileOrAssembleAction(Actions.back())) {
// Add a 'dsymutil' step if necessary, when debug info is enabled and we
// have a compile input. We need to run 'dsymutil' ourselves in such cases
// because the debug info will refer to a temporary object file which
// will be removed at the end of the compilation process.
if (Act->getType() == types::TY_Image) {
ActionList Inputs;
Inputs.push_back(Actions.back());
Actions.pop_back();
Actions.push_back(
C.MakeAction<DsymutilJobAction>(Inputs, types::TY_dSYM));
}
// Verify the debug info output.
if (Args.hasArg(options::OPT_verify_debug_info)) {
Action* LastAction = Actions.back();
Actions.pop_back();
Actions.push_back(C.MakeAction<VerifyDebugInfoJobAction>(
LastAction, types::TY_Nothing));
}
}
}
}
/// Check that the file referenced by Value exists. If it doesn't,
/// issue a diagnostic and return false.
static bool DiagnoseInputExistence(const Driver &D, const DerivedArgList &Args,
StringRef Value, types::ID Ty) {
if (!D.getCheckInputsExist())
return true;
// stdin always exists.
if (Value == "-")
return true;
SmallString<64> Path(Value);
if (Arg *WorkDir = Args.getLastArg(options::OPT_working_directory)) {
if (!llvm::sys::path::is_absolute(Path)) {
SmallString<64> Directory(WorkDir->getValue());
llvm::sys::path::append(Directory, Value);
Path.assign(Directory);
}
}
if (llvm::sys::fs::exists(Twine(Path)))
return true;
if (D.IsCLMode()) {
if (!llvm::sys::path::is_absolute(Twine(Path)) &&
llvm::sys::Process::FindInEnvPath("LIB", Value))
return true;
if (Args.hasArg(options::OPT__SLASH_link) && Ty == types::TY_Object) {
// Arguments to the /link flag might cause the linker to search for object
// and library files in paths we don't know about. Don't error in such
// cases.
return true;
}
}
D.Diag(clang::diag::err_drv_no_such_file) << Path;
return false;
}
// Construct a the list of inputs and their types.
void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
InputList &Inputs) const {
// Track the current user specified (-x) input. We also explicitly track the
// argument used to set the type; we only want to claim the type when we
// actually use it, so we warn about unused -x arguments.
types::ID InputType = types::TY_Nothing;
Arg *InputTypeArg = nullptr;
// The last /TC or /TP option sets the input type to C or C++ globally.
if (Arg *TCTP = Args.getLastArgNoClaim(options::OPT__SLASH_TC,
options::OPT__SLASH_TP)) {
InputTypeArg = TCTP;
InputType = TCTP->getOption().matches(options::OPT__SLASH_TC)
? types::TY_C
: types::TY_CXX;
Arg *Previous = nullptr;
bool ShowNote = false;
for (Arg *A : Args.filtered(options::OPT__SLASH_TC, options::OPT__SLASH_TP)) {
if (Previous) {
Diag(clang::diag::warn_drv_overriding_flag_option)
<< Previous->getSpelling() << A->getSpelling();
ShowNote = true;
}
Previous = A;
}
if (ShowNote)
Diag(clang::diag::note_drv_t_option_is_global);
// No driver mode exposes -x and /TC or /TP; we don't support mixing them.
assert(!Args.hasArg(options::OPT_x) && "-x and /TC or /TP is not allowed");
}
for (Arg *A : Args) {
if (A->getOption().getKind() == Option::InputClass) {
const char *Value = A->getValue();
types::ID Ty = types::TY_INVALID;
// Infer the input type if necessary.
if (InputType == types::TY_Nothing) {
// If there was an explicit arg for this, claim it.
if (InputTypeArg)
InputTypeArg->claim();
// stdin must be handled specially.
if (memcmp(Value, "-", 2) == 0) {
// If running with -E, treat as a C input (this changes the builtin
// macros, for example). This may be overridden by -ObjC below.
//
// Otherwise emit an error but still use a valid type to avoid
// spurious errors (e.g., no inputs).
if (!Args.hasArgNoClaim(options::OPT_E) && !CCCIsCPP())
Diag(IsCLMode() ? clang::diag::err_drv_unknown_stdin_type_clang_cl
: clang::diag::err_drv_unknown_stdin_type);
Ty = types::TY_C;
} else {
// Otherwise lookup by extension.
// Fallback is C if invoked as C preprocessor or Object otherwise.
// We use a host hook here because Darwin at least has its own
// idea of what .s is.
if (const char *Ext = strrchr(Value, '.'))
Ty = TC.LookupTypeForExtension(Ext + 1);
if (Ty == types::TY_INVALID) {
if (CCCIsCPP())
Ty = types::TY_C;
else
Ty = types::TY_Object;
}
// If the driver is invoked as C++ compiler (like clang++ or c++) it
// should autodetect some input files as C++ for g++ compatibility.
if (CCCIsCXX()) {
types::ID OldTy = Ty;
Ty = types::lookupCXXTypeForCType(Ty);
if (Ty != OldTy)
Diag(clang::diag::warn_drv_treating_input_as_cxx)
<< getTypeName(OldTy) << getTypeName(Ty);
}
}
// -ObjC and -ObjC++ override the default language, but only for "source
// files". We just treat everything that isn't a linker input as a
// source file.
//
// FIXME: Clean this up if we move the phase sequence into the type.
if (Ty != types::TY_Object) {
if (Args.hasArg(options::OPT_ObjC))
Ty = types::TY_ObjC;
else if (Args.hasArg(options::OPT_ObjCXX))
Ty = types::TY_ObjCXX;
}
} else {
assert(InputTypeArg && "InputType set w/o InputTypeArg");
if (!InputTypeArg->getOption().matches(options::OPT_x)) {
// If emulating cl.exe, make sure that /TC and /TP don't affect input
// object files.
const char *Ext = strrchr(Value, '.');
if (Ext && TC.LookupTypeForExtension(Ext + 1) == types::TY_Object)
Ty = types::TY_Object;
}
if (Ty == types::TY_INVALID) {
Ty = InputType;
InputTypeArg->claim();
}
}
if (DiagnoseInputExistence(*this, Args, Value, Ty))
Inputs.push_back(std::make_pair(Ty, A));
} else if (A->getOption().matches(options::OPT__SLASH_Tc)) {
StringRef Value = A->getValue();
if (DiagnoseInputExistence(*this, Args, Value, types::TY_C)) {
Arg *InputArg = MakeInputArg(Args, *Opts, A->getValue());
Inputs.push_back(std::make_pair(types::TY_C, InputArg));
}
A->claim();
} else if (A->getOption().matches(options::OPT__SLASH_Tp)) {
StringRef Value = A->getValue();
if (DiagnoseInputExistence(*this, Args, Value, types::TY_CXX)) {
Arg *InputArg = MakeInputArg(Args, *Opts, A->getValue());
Inputs.push_back(std::make_pair(types::TY_CXX, InputArg));
}
A->claim();
} else if (A->getOption().hasFlag(options::LinkerInput)) {
// Just treat as object type, we could make a special type for this if
// necessary.
Inputs.push_back(std::make_pair(types::TY_Object, A));
} else if (A->getOption().matches(options::OPT_x)) {
InputTypeArg = A;
InputType = types::lookupTypeForTypeSpecifier(A->getValue());
A->claim();
// Follow gcc behavior and treat as linker input for invalid -x
// options. Its not clear why we shouldn't just revert to unknown; but
// this isn't very important, we might as well be bug compatible.
if (!InputType) {
Diag(clang::diag::err_drv_unknown_language) << A->getValue();
InputType = types::TY_Object;
}
} else if (A->getOption().getID() == options::OPT__SLASH_U) {
assert(A->getNumValues() == 1 && "The /U option has one value.");
StringRef Val = A->getValue(0);
if (Val.find_first_of("/\\") != StringRef::npos) {
// Warn about e.g. "/Users/me/myfile.c".
Diag(diag::warn_slash_u_filename) << Val;
Diag(diag::note_use_dashdash);
}
}
}
if (CCCIsCPP() && Inputs.empty()) {
// If called as standalone preprocessor, stdin is processed
// if no other input is present.
Arg *A = MakeInputArg(Args, *Opts, "-");
Inputs.push_back(std::make_pair(types::TY_C, A));
}
}
namespace {
/// Provides a convenient interface for different programming models to generate
/// the required device actions.
class OffloadingActionBuilder final {
/// Flag used to trace errors in the builder.
bool IsValid = false;
/// The compilation that is using this builder.
Compilation &C;
/// Map between an input argument and the offload kinds used to process it.
std::map<const Arg *, unsigned> InputArgToOffloadKindMap;
/// Builder interface. It doesn't build anything or keep any state.
class DeviceActionBuilder {
public:
typedef llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PhasesTy;
enum ActionBuilderReturnCode {
// The builder acted successfully on the current action.
ABRT_Success,
// The builder didn't have to act on the current action.
ABRT_Inactive,
// The builder was successful and requested the host action to not be
// generated.
ABRT_Ignore_Host,
};
protected:
/// Compilation associated with this builder.
Compilation &C;
/// Tool chains associated with this builder. The same programming
/// model may have associated one or more tool chains.
SmallVector<const ToolChain *, 2> ToolChains;
/// The derived arguments associated with this builder.
DerivedArgList &Args;
/// The inputs associated with this builder.
const Driver::InputList &Inputs;
/// The associated offload kind.
Action::OffloadKind AssociatedOffloadKind = Action::OFK_None;
public:
DeviceActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs,
Action::OffloadKind AssociatedOffloadKind)
: C(C), Args(Args), Inputs(Inputs),
AssociatedOffloadKind(AssociatedOffloadKind) {}
virtual ~DeviceActionBuilder() {}
/// Fill up the array \a DA with all the device dependences that should be
/// added to the provided host action \a HostAction. By default it is
/// inactive.
virtual ActionBuilderReturnCode
getDeviceDependences(OffloadAction::DeviceDependences &DA,
phases::ID CurPhase, phases::ID FinalPhase,
PhasesTy &Phases) {
return ABRT_Inactive;
}
/// Update the state to include the provided host action \a HostAction as a
/// dependency of the current device action. By default it is inactive.
virtual ActionBuilderReturnCode addDeviceDepences(Action *HostAction) {
return ABRT_Inactive;
}
/// Append top level actions generated by the builder. Return true if errors
/// were found.
virtual void appendTopLevelActions(ActionList &AL) {}
/// Append linker actions generated by the builder. Return true if errors
/// were found.
virtual void appendLinkDependences(OffloadAction::DeviceDependences &DA) {}
/// Initialize the builder. Return true if any initialization errors are
/// found.
virtual bool initialize() { return false; }
/// Return true if the builder can use bundling/unbundling.
virtual bool canUseBundlerUnbundler() const { return false; }
/// Return true if this builder is valid. We have a valid builder if we have
/// associated device tool chains.
bool isValid() { return !ToolChains.empty(); }
/// Return the associated offload kind.
Action::OffloadKind getAssociatedOffloadKind() {
return AssociatedOffloadKind;
}
};
/// Base class for CUDA/HIP action builder. It injects device code in
/// the host backend action.
class CudaActionBuilderBase : public DeviceActionBuilder {
protected:
/// Flags to signal if the user requested host-only or device-only
/// compilation.
bool CompileHostOnly = false;
bool CompileDeviceOnly = false;
/// List of GPU architectures to use in this compilation.
SmallVector<CudaArch, 4> GpuArchList;
/// The CUDA actions for the current input.
ActionList CudaDeviceActions;
/// The CUDA fat binary if it was generated for the current input.
Action *CudaFatBinary = nullptr;
/// Flag that is set to true if this builder acted on the current input.
bool IsActive = false;
public:
CudaActionBuilderBase(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs,
Action::OffloadKind OFKind)
: DeviceActionBuilder(C, Args, Inputs, OFKind) {}
ActionBuilderReturnCode addDeviceDepences(Action *HostAction) override {
// While generating code for CUDA, we only depend on the host input action
// to trigger the creation of all the CUDA device actions.
// If we are dealing with an input action, replicate it for each GPU
// architecture. If we are in host-only mode we return 'success' so that
// the host uses the CUDA offload kind.
if (auto *IA = dyn_cast<InputAction>(HostAction)) {
assert(!GpuArchList.empty() &&
"We should have at least one GPU architecture.");
// If the host input is not CUDA or HIP, we don't need to bother about
// this input.
if (IA->getType() != types::TY_CUDA &&
IA->getType() != types::TY_HIP) {
// The builder will ignore this input.
IsActive = false;
return ABRT_Inactive;
}
// Set the flag to true, so that the builder acts on the current input.
IsActive = true;
if (CompileHostOnly)
return ABRT_Success;
// Replicate inputs for each GPU architecture.
auto Ty = IA->getType() == types::TY_HIP ? types::TY_HIP_DEVICE
: types::TY_CUDA_DEVICE;
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
CudaDeviceActions.push_back(
C.MakeAction<InputAction>(IA->getInputArg(), Ty));
}
return ABRT_Success;
}
// If this is an unbundling action use it as is for each CUDA toolchain.
if (auto *UA = dyn_cast<OffloadUnbundlingJobAction>(HostAction)) {
CudaDeviceActions.clear();
for (auto Arch : GpuArchList) {
CudaDeviceActions.push_back(UA);
UA->registerDependentActionInfo(ToolChains[0], CudaArchToString(Arch),
AssociatedOffloadKind);
}
return ABRT_Success;
}
return IsActive ? ABRT_Success : ABRT_Inactive;
}
void appendTopLevelActions(ActionList &AL) override {
// Utility to append actions to the top level list.
auto AddTopLevel = [&](Action *A, CudaArch BoundArch) {
OffloadAction::DeviceDependences Dep;
Dep.add(*A, *ToolChains.front(), CudaArchToString(BoundArch),
AssociatedOffloadKind);
AL.push_back(C.MakeAction<OffloadAction>(Dep, A->getType()));
};
// If we have a fat binary, add it to the list.
if (CudaFatBinary) {
AddTopLevel(CudaFatBinary, CudaArch::UNKNOWN);
CudaDeviceActions.clear();
CudaFatBinary = nullptr;
return;
}
if (CudaDeviceActions.empty())
return;
// If we have CUDA actions at this point, that's because we have a have
// partial compilation, so we should have an action for each GPU
// architecture.
assert(CudaDeviceActions.size() == GpuArchList.size() &&
"Expecting one action per GPU architecture.");
assert(ToolChains.size() == 1 &&
"Expecting to have a sing CUDA toolchain.");
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
AddTopLevel(CudaDeviceActions[I], GpuArchList[I]);
CudaDeviceActions.clear();
}
bool initialize() override {
assert(AssociatedOffloadKind == Action::OFK_Cuda ||
AssociatedOffloadKind == Action::OFK_HIP);
// We don't need to support CUDA.
if (AssociatedOffloadKind == Action::OFK_Cuda &&
!C.hasOffloadToolChain<Action::OFK_Cuda>())
return false;
// We don't need to support HIP.
if (AssociatedOffloadKind == Action::OFK_HIP &&
!C.hasOffloadToolChain<Action::OFK_HIP>())
return false;
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
assert(HostTC && "No toolchain for host compilation.");
if (HostTC->getTriple().isNVPTX() ||
HostTC->getTriple().getArch() == llvm::Triple::amdgcn) {
// We do not support targeting NVPTX/AMDGCN for host compilation. Throw
// an error and abort pipeline construction early so we don't trip
// asserts that assume device-side compilation.
C.getDriver().Diag(diag::err_drv_cuda_host_arch)
<< HostTC->getTriple().getArchName();
return true;
}
ToolChains.push_back(
AssociatedOffloadKind == Action::OFK_Cuda
? C.getSingleOffloadToolChain<Action::OFK_Cuda>()
: C.getSingleOffloadToolChain<Action::OFK_HIP>());
Arg *PartialCompilationArg = Args.getLastArg(
options::OPT_cuda_host_only, options::OPT_cuda_device_only,
options::OPT_cuda_compile_host_device);
CompileHostOnly = PartialCompilationArg &&
PartialCompilationArg->getOption().matches(
options::OPT_cuda_host_only);
CompileDeviceOnly = PartialCompilationArg &&
PartialCompilationArg->getOption().matches(
options::OPT_cuda_device_only);
// Collect all cuda_gpu_arch parameters, removing duplicates.
std::set<CudaArch> GpuArchs;
bool Error = false;
for (Arg *A : Args) {
if (!(A->getOption().matches(options::OPT_cuda_gpu_arch_EQ) ||
A->getOption().matches(options::OPT_no_cuda_gpu_arch_EQ)))
continue;
A->claim();
const StringRef ArchStr = A->getValue();
if (A->getOption().matches(options::OPT_no_cuda_gpu_arch_EQ) &&
ArchStr == "all") {
GpuArchs.clear();
continue;
}
CudaArch Arch = StringToCudaArch(ArchStr);
if (Arch == CudaArch::UNKNOWN) {
C.getDriver().Diag(clang::diag::err_drv_cuda_bad_gpu_arch) << ArchStr;
Error = true;
} else if (A->getOption().matches(options::OPT_cuda_gpu_arch_EQ))
GpuArchs.insert(Arch);
else if (A->getOption().matches(options::OPT_no_cuda_gpu_arch_EQ))
GpuArchs.erase(Arch);
else
llvm_unreachable("Unexpected option.");
}
// Collect list of GPUs remaining in the set.
for (CudaArch Arch : GpuArchs)
GpuArchList.push_back(Arch);
// Default to sm_20 which is the lowest common denominator for
// supported GPUs. sm_20 code should work correctly, if
// suboptimally, on all newer GPUs.
if (GpuArchList.empty())
GpuArchList.push_back(CudaArch::SM_20);
return Error;
}
};
/// \brief CUDA action builder. It injects device code in the host backend
/// action.
class CudaActionBuilder final : public CudaActionBuilderBase {
public:
CudaActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs)
: CudaActionBuilderBase(C, Args, Inputs, Action::OFK_Cuda) {}
ActionBuilderReturnCode
getDeviceDependences(OffloadAction::DeviceDependences &DA,
phases::ID CurPhase, phases::ID FinalPhase,
PhasesTy &Phases) override {
if (!IsActive)
return ABRT_Inactive;
// If we don't have more CUDA actions, we don't have any dependences to
// create for the host.
if (CudaDeviceActions.empty())
return ABRT_Success;
assert(CudaDeviceActions.size() == GpuArchList.size() &&
"Expecting one action per GPU architecture.");
assert(!CompileHostOnly &&
"Not expecting CUDA actions in host-only compilation.");
// If we are generating code for the device or we are in a backend phase,
// we attempt to generate the fat binary. We compile each arch to ptx and
// assemble to cubin, then feed the cubin *and* the ptx into a device
// "link" action, which uses fatbinary to combine these cubins into one
// fatbin. The fatbin is then an input to the host action if not in
// device-only mode.
if (CompileDeviceOnly || CurPhase == phases::Backend) {
ActionList DeviceActions;
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
// Produce the device action from the current phase up to the assemble
// phase.
for (auto Ph : Phases) {
// Skip the phases that were already dealt with.
if (Ph < CurPhase)
continue;
// We have to be consistent with the host final phase.
if (Ph > FinalPhase)
break;
CudaDeviceActions[I] = C.getDriver().ConstructPhaseAction(
C, Args, Ph, CudaDeviceActions[I], Action::OFK_Cuda);
if (Ph == phases::Assemble)
break;
}
// If we didn't reach the assemble phase, we can't generate the fat
// binary. We don't need to generate the fat binary if we are not in
// device-only mode.
if (!isa<AssembleJobAction>(CudaDeviceActions[I]) ||
CompileDeviceOnly)
continue;
Action *AssembleAction = CudaDeviceActions[I];
assert(AssembleAction->getType() == types::TY_Object);
assert(AssembleAction->getInputs().size() == 1);
Action *BackendAction = AssembleAction->getInputs()[0];
assert(BackendAction->getType() == types::TY_PP_Asm);
for (auto &A : {AssembleAction, BackendAction}) {
OffloadAction::DeviceDependences DDep;
DDep.add(*A, *ToolChains.front(), CudaArchToString(GpuArchList[I]),
Action::OFK_Cuda);
DeviceActions.push_back(
C.MakeAction<OffloadAction>(DDep, A->getType()));
}
}
// We generate the fat binary if we have device input actions.
if (!DeviceActions.empty()) {
CudaFatBinary =
C.MakeAction<LinkJobAction>(DeviceActions, types::TY_CUDA_FATBIN);
if (!CompileDeviceOnly) {
DA.add(*CudaFatBinary, *ToolChains.front(), /*BoundArch=*/nullptr,
Action::OFK_Cuda);
// Clear the fat binary, it is already a dependence to an host
// action.
CudaFatBinary = nullptr;
}
// Remove the CUDA actions as they are already connected to an host
// action or fat binary.
CudaDeviceActions.clear();
}
// We avoid creating host action in device-only mode.
return CompileDeviceOnly ? ABRT_Ignore_Host : ABRT_Success;
} else if (CurPhase > phases::Backend) {
// If we are past the backend phase and still have a device action, we
// don't have to do anything as this action is already a device
// top-level action.
return ABRT_Success;
}
assert(CurPhase < phases::Backend && "Generating single CUDA "
"instructions should only occur "
"before the backend phase!");
// By default, we produce an action for each device arch.
for (Action *&A : CudaDeviceActions)
A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A);
return ABRT_Success;
}
};
/// \brief HIP action builder. It injects device code in the host backend
/// action.
class HIPActionBuilder final : public CudaActionBuilderBase {
/// The linker inputs obtained for each device arch.
SmallVector<ActionList, 8> DeviceLinkerInputs;
public:
HIPActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs)
: CudaActionBuilderBase(C, Args, Inputs, Action::OFK_HIP) {}
bool canUseBundlerUnbundler() const override { return true; }
ActionBuilderReturnCode
getDeviceDependences(OffloadAction::DeviceDependences &DA,
phases::ID CurPhase, phases::ID FinalPhase,
PhasesTy &Phases) override {
// amdgcn does not support linking of object files, therefore we skip
// backend and assemble phases to output LLVM IR.
if (CudaDeviceActions.empty() || CurPhase == phases::Backend ||
CurPhase == phases::Assemble)
return ABRT_Success;
assert((CurPhase == phases::Link ||
CudaDeviceActions.size() == GpuArchList.size()) &&
"Expecting one action per GPU architecture.");
assert(!CompileHostOnly &&
"Not expecting CUDA actions in host-only compilation.");
// Save CudaDeviceActions to DeviceLinkerInputs for each GPU subarch.
// This happens to each device action originated from each input file.
// Later on, device actions in DeviceLinkerInputs are used to create
// device link actions in appendLinkDependences and the created device
// link actions are passed to the offload action as device dependence.
if (CurPhase == phases::Link) {
DeviceLinkerInputs.resize(CudaDeviceActions.size());
auto LI = DeviceLinkerInputs.begin();
for (auto *A : CudaDeviceActions) {
LI->push_back(A);
++LI;
}
// We will pass the device action as a host dependence, so we don't
// need to do anything else with them.
CudaDeviceActions.clear();
return ABRT_Success;
}
// By default, we produce an action for each device arch.
for (Action *&A : CudaDeviceActions)
A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A,
AssociatedOffloadKind);
return ABRT_Success;
}
void appendLinkDependences(OffloadAction::DeviceDependences &DA) override {
// Append a new link action for each device.
unsigned I = 0;
for (auto &LI : DeviceLinkerInputs) {
auto *DeviceLinkAction =
C.MakeAction<LinkJobAction>(LI, types::TY_Image);
DA.add(*DeviceLinkAction, *ToolChains[0],
CudaArchToString(GpuArchList[I]), AssociatedOffloadKind);
++I;
}
}
};
/// OpenMP action builder. The host bitcode is passed to the device frontend
/// and all the device linked images are passed to the host link phase.
class OpenMPActionBuilder final : public DeviceActionBuilder {
/// The OpenMP actions for the current input.
ActionList OpenMPDeviceActions;
/// The linker inputs obtained for each toolchain.
SmallVector<ActionList, 8> DeviceLinkerInputs;
public:
OpenMPActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs)
: DeviceActionBuilder(C, Args, Inputs, Action::OFK_OpenMP) {}
ActionBuilderReturnCode
getDeviceDependences(OffloadAction::DeviceDependences &DA,
phases::ID CurPhase, phases::ID FinalPhase,
PhasesTy &Phases) override {
// We should always have an action for each input.
assert(OpenMPDeviceActions.size() == ToolChains.size() &&
"Number of OpenMP actions and toolchains do not match.");
// The host only depends on device action in the linking phase, when all
// the device images have to be embedded in the host image.
if (CurPhase == phases::Link) {
assert(ToolChains.size() == DeviceLinkerInputs.size() &&
"Toolchains and linker inputs sizes do not match.");
auto LI = DeviceLinkerInputs.begin();
for (auto *A : OpenMPDeviceActions) {
LI->push_back(A);
++LI;
}
// We passed the device action as a host dependence, so we don't need to
// do anything else with them.
OpenMPDeviceActions.clear();
return ABRT_Success;
}
// By default, we produce an action for each device arch.
for (Action *&A : OpenMPDeviceActions)
A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A);
return ABRT_Success;
}
ActionBuilderReturnCode addDeviceDepences(Action *HostAction) override {
// If this is an input action replicate it for each OpenMP toolchain.
if (auto *IA = dyn_cast<InputAction>(HostAction)) {
OpenMPDeviceActions.clear();
for (unsigned I = 0; I < ToolChains.size(); ++I)
OpenMPDeviceActions.push_back(
C.MakeAction<InputAction>(IA->getInputArg(), IA->getType()));
return ABRT_Success;
}
// If this is an unbundling action use it as is for each OpenMP toolchain.
if (auto *UA = dyn_cast<OffloadUnbundlingJobAction>(HostAction)) {
OpenMPDeviceActions.clear();
for (unsigned I = 0; I < ToolChains.size(); ++I) {
OpenMPDeviceActions.push_back(UA);
UA->registerDependentActionInfo(
ToolChains[I], /*BoundArch=*/StringRef(), Action::OFK_OpenMP);
}
return ABRT_Success;
}
// When generating code for OpenMP we use the host compile phase result as
// a dependence to the device compile phase so that it can learn what
// declarations should be emitted. However, this is not the only use for
// the host action, so we prevent it from being collapsed.
if (isa<CompileJobAction>(HostAction)) {
HostAction->setCannotBeCollapsedWithNextDependentAction();
assert(ToolChains.size() == OpenMPDeviceActions.size() &&
"Toolchains and device action sizes do not match.");
OffloadAction::HostDependence HDep(
*HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
/*BoundArch=*/nullptr, Action::OFK_OpenMP);
auto TC = ToolChains.begin();
for (Action *&A : OpenMPDeviceActions) {
assert(isa<CompileJobAction>(A));
OffloadAction::DeviceDependences DDep;
DDep.add(*A, **TC, /*BoundArch=*/nullptr, Action::OFK_OpenMP);
A = C.MakeAction<OffloadAction>(HDep, DDep);
++TC;
}
}
return ABRT_Success;
}
void appendTopLevelActions(ActionList &AL) override {
if (OpenMPDeviceActions.empty())
return;
// We should always have an action for each input.
assert(OpenMPDeviceActions.size() == ToolChains.size() &&
"Number of OpenMP actions and toolchains do not match.");
// Append all device actions followed by the proper offload action.
auto TI = ToolChains.begin();
for (auto *A : OpenMPDeviceActions) {
OffloadAction::DeviceDependences Dep;
Dep.add(*A, **TI, /*BoundArch=*/nullptr, Action::OFK_OpenMP);
AL.push_back(C.MakeAction<OffloadAction>(Dep, A->getType()));
++TI;
}
// We no longer need the action stored in this builder.
OpenMPDeviceActions.clear();
}
void appendLinkDependences(OffloadAction::DeviceDependences &DA) override {
assert(ToolChains.size() == DeviceLinkerInputs.size() &&
"Toolchains and linker inputs sizes do not match.");
// Append a new link action for each device.
auto TC = ToolChains.begin();
for (auto &LI : DeviceLinkerInputs) {
auto *DeviceLinkAction =
C.MakeAction<LinkJobAction>(LI, types::TY_Image);
DA.add(*DeviceLinkAction, **TC, /*BoundArch=*/nullptr,
Action::OFK_OpenMP);
++TC;
}
}
bool initialize() override {
// Get the OpenMP toolchains. If we don't get any, the action builder will
// know there is nothing to do related to OpenMP offloading.
auto OpenMPTCRange = C.getOffloadToolChains<Action::OFK_OpenMP>();
for (auto TI = OpenMPTCRange.first, TE = OpenMPTCRange.second; TI != TE;
++TI)
ToolChains.push_back(TI->second);
DeviceLinkerInputs.resize(ToolChains.size());
return false;
}
bool canUseBundlerUnbundler() const override {
// OpenMP should use bundled files whenever possible.
return true;
}
};
///
/// TODO: Add the implementation for other specialized builders here.
///
/// Specialized builders being used by this offloading action builder.
SmallVector<DeviceActionBuilder *, 4> SpecializedBuilders;
/// Flag set to true if all valid builders allow file bundling/unbundling.
bool CanUseBundler;
public:
OffloadingActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs)
: C(C) {
// Create a specialized builder for each device toolchain.
IsValid = true;
// Create a specialized builder for CUDA.
SpecializedBuilders.push_back(new CudaActionBuilder(C, Args, Inputs));
// Create a specialized builder for HIP.
SpecializedBuilders.push_back(new HIPActionBuilder(C, Args, Inputs));
// Create a specialized builder for OpenMP.
SpecializedBuilders.push_back(new OpenMPActionBuilder(C, Args, Inputs));
//
// TODO: Build other specialized builders here.
//
// Initialize all the builders, keeping track of errors. If all valid
// builders agree that we can use bundling, set the flag to true.
unsigned ValidBuilders = 0u;
unsigned ValidBuildersSupportingBundling = 0u;
for (auto *SB : SpecializedBuilders) {
IsValid = IsValid && !SB->initialize();
// Update the counters if the builder is valid.
if (SB->isValid()) {
++ValidBuilders;
if (SB->canUseBundlerUnbundler())
++ValidBuildersSupportingBundling;
}
}
CanUseBundler =
ValidBuilders && ValidBuilders == ValidBuildersSupportingBundling;
}
~OffloadingActionBuilder() {
for (auto *SB : SpecializedBuilders)
delete SB;
}
/// Generate an action that adds device dependences (if any) to a host action.
/// If no device dependence actions exist, just return the host action \a
/// HostAction. If an error is found or if no builder requires the host action
/// to be generated, return nullptr.
Action *
addDeviceDependencesToHostAction(Action *HostAction, const Arg *InputArg,
phases::ID CurPhase, phases::ID FinalPhase,
DeviceActionBuilder::PhasesTy &Phases) {
if (!IsValid)
return nullptr;
if (SpecializedBuilders.empty())
return HostAction;
assert(HostAction && "Invalid host action!");
OffloadAction::DeviceDependences DDeps;
// Check if all the programming models agree we should not emit the host
// action. Also, keep track of the offloading kinds employed.
auto &OffloadKind = InputArgToOffloadKindMap[InputArg];
unsigned InactiveBuilders = 0u;
unsigned IgnoringBuilders = 0u;
for (auto *SB : SpecializedBuilders) {
if (!SB->isValid()) {
++InactiveBuilders;
continue;
}
auto RetCode =
SB->getDeviceDependences(DDeps, CurPhase, FinalPhase, Phases);
// If the builder explicitly says the host action should be ignored,
// we need to increment the variable that tracks the builders that request
// the host object to be ignored.
if (RetCode == DeviceActionBuilder::ABRT_Ignore_Host)
++IgnoringBuilders;
// Unless the builder was inactive for this action, we have to record the
// offload kind because the host will have to use it.
if (RetCode != DeviceActionBuilder::ABRT_Inactive)
OffloadKind |= SB->getAssociatedOffloadKind();
}
// If all builders agree that the host object should be ignored, just return
// nullptr.
if (IgnoringBuilders &&
SpecializedBuilders.size() == (InactiveBuilders + IgnoringBuilders))
return nullptr;
if (DDeps.getActions().empty())
return HostAction;
// We have dependences we need to bundle together. We use an offload action
// for that.
OffloadAction::HostDependence HDep(
*HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
/*BoundArch=*/nullptr, DDeps);
return C.MakeAction<OffloadAction>(HDep, DDeps);
}
/// Generate an action that adds a host dependence to a device action. The
/// results will be kept in this action builder. Return true if an error was
/// found.
bool addHostDependenceToDeviceActions(Action *&HostAction,
const Arg *InputArg) {
if (!IsValid)
return true;
// If we are supporting bundling/unbundling and the current action is an
// input action of non-source file, we replace the host action by the
// unbundling action. The bundler tool has the logic to detect if an input
// is a bundle or not and if the input is not a bundle it assumes it is a
// host file. Therefore it is safe to create an unbundling action even if
// the input is not a bundle.
if (CanUseBundler && isa<InputAction>(HostAction) &&
InputArg->getOption().getKind() == llvm::opt::Option::InputClass &&
!types::isSrcFile(HostAction->getType())) {
auto UnbundlingHostAction =
C.MakeAction<OffloadUnbundlingJobAction>(HostAction);
UnbundlingHostAction->registerDependentActionInfo(
C.getSingleOffloadToolChain<Action::OFK_Host>(),
/*BoundArch=*/StringRef(), Action::OFK_Host);
HostAction = UnbundlingHostAction;
}
assert(HostAction && "Invalid host action!");
// Register the offload kinds that are used.
auto &OffloadKind = InputArgToOffloadKindMap[InputArg];
for (auto *SB : SpecializedBuilders) {
if (!SB->isValid())
continue;
auto RetCode = SB->addDeviceDepences(HostAction);
// Host dependences for device actions are not compatible with that same
// action being ignored.
assert(RetCode != DeviceActionBuilder::ABRT_Ignore_Host &&
"Host dependence not expected to be ignored.!");
// Unless the builder was inactive for this action, we have to record the
// offload kind because the host will have to use it.
if (RetCode != DeviceActionBuilder::ABRT_Inactive)
OffloadKind |= SB->getAssociatedOffloadKind();
}
return false;
}
/// Add the offloading top level actions to the provided action list. This
/// function can replace the host action by a bundling action if the
/// programming models allow it.
bool appendTopLevelActions(ActionList &AL, Action *HostAction,
const Arg *InputArg) {
// Get the device actions to be appended.
ActionList OffloadAL;
for (auto *SB : SpecializedBuilders) {
if (!SB->isValid())
continue;
SB->appendTopLevelActions(OffloadAL);
}
// If we can use the bundler, replace the host action by the bundling one in
// the resulting list. Otherwise, just append the device actions.
if (CanUseBundler && !OffloadAL.empty()) {
// Add the host action to the list in order to create the bundling action.
OffloadAL.push_back(HostAction);
// We expect that the host action was just appended to the action list
// before this method was called.
assert(HostAction == AL.back() && "Host action not in the list??");
HostAction = C.MakeAction<OffloadBundlingJobAction>(OffloadAL);
AL.back() = HostAction;
} else
AL.append(OffloadAL.begin(), OffloadAL.end());
// Propagate to the current host action (if any) the offload information
// associated with the current input.
if (HostAction)
HostAction->propagateHostOffloadInfo(InputArgToOffloadKindMap[InputArg],
/*BoundArch=*/nullptr);
return false;
}
/// Processes the host linker action. This currently consists of replacing it
/// with an offload action if there are device link objects and propagate to
/// the host action all the offload kinds used in the current compilation. The
/// resulting action is returned.
Action *processHostLinkAction(Action *HostAction) {
// Add all the dependences from the device linking actions.
OffloadAction::DeviceDependences DDeps;
for (auto *SB : SpecializedBuilders) {
if (!SB->isValid())
continue;
SB->appendLinkDependences(DDeps);
}
// Calculate all the offload kinds used in the current compilation.
unsigned ActiveOffloadKinds = 0u;
for (auto &I : InputArgToOffloadKindMap)
ActiveOffloadKinds |= I.second;
// If we don't have device dependencies, we don't have to create an offload
// action.
if (DDeps.getActions().empty()) {
// Propagate all the active kinds to host action. Given that it is a link
// action it is assumed to depend on all actions generated so far.
HostAction->propagateHostOffloadInfo(ActiveOffloadKinds,
/*BoundArch=*/nullptr);
return HostAction;
}
// Create the offload action with all dependences. When an offload action
// is created the kinds are propagated to the host action, so we don't have
// to do that explicitly here.
OffloadAction::HostDependence HDep(
*HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
/*BoundArch*/ nullptr, ActiveOffloadKinds);
return C.MakeAction<OffloadAction>(HDep, DDeps);
}
};
} // anonymous namespace.
void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
const InputList &Inputs, ActionList &Actions) const {
llvm::PrettyStackTraceString CrashInfo("Building compilation actions");
if (!SuppressMissingInputWarning && Inputs.empty()) {
Diag(clang::diag::err_drv_no_input_files);
return;
}
Arg *FinalPhaseArg;
phases::ID FinalPhase = getFinalPhase(Args, &FinalPhaseArg);
if (FinalPhase == phases::Link) {
if (Args.hasArg(options::OPT_emit_llvm))
Diag(clang::diag::err_drv_emit_llvm_link);
if (IsCLMode() && LTOMode != LTOK_None &&
!Args.getLastArgValue(options::OPT_fuse_ld_EQ).equals_lower("lld"))
Diag(clang::diag::err_drv_lto_without_lld);
}
// Reject -Z* at the top level, these options should never have been exposed
// by gcc.
if (Arg *A = Args.getLastArg(options::OPT_Z_Joined))
Diag(clang::diag::err_drv_use_of_Z_option) << A->getAsString(Args);
// Diagnose misuse of /Fo.
if (Arg *A = Args.getLastArg(options::OPT__SLASH_Fo)) {
StringRef V = A->getValue();
if (Inputs.size() > 1 && !V.empty() &&
!llvm::sys::path::is_separator(V.back())) {
// Check whether /Fo tries to name an output file for multiple inputs.
Diag(clang::diag::err_drv_out_file_argument_with_multiple_sources)
<< A->getSpelling() << V;
Args.eraseArg(options::OPT__SLASH_Fo);
}
}
// Diagnose misuse of /Fa.
if (Arg *A = Args.getLastArg(options::OPT__SLASH_Fa)) {
StringRef V = A->getValue();
if (Inputs.size() > 1 && !V.empty() &&
!llvm::sys::path::is_separator(V.back())) {
// Check whether /Fa tries to name an asm file for multiple inputs.
Diag(clang::diag::err_drv_out_file_argument_with_multiple_sources)
<< A->getSpelling() << V;
Args.eraseArg(options::OPT__SLASH_Fa);
}
}
// Diagnose misuse of /o.
if (Arg *A = Args.getLastArg(options::OPT__SLASH_o)) {
if (A->getValue()[0] == '\0') {
// It has to have a value.
Diag(clang::diag::err_drv_missing_argument) << A->getSpelling() << 1;
Args.eraseArg(options::OPT__SLASH_o);
}
}
// Diagnose unsupported forms of /Yc /Yu. Ignore /Yc/Yu for now if:
// * no filename after it
// * both /Yc and /Yu passed but with different filenames
// * corresponding file not also passed as /FI
Arg *YcArg = Args.getLastArg(options::OPT__SLASH_Yc);
Arg *YuArg = Args.getLastArg(options::OPT__SLASH_Yu);
if (YcArg && YcArg->getValue()[0] == '\0') {
Diag(clang::diag::warn_drv_ycyu_no_arg_clang_cl) << YcArg->getSpelling();
Args.eraseArg(options::OPT__SLASH_Yc);
YcArg = nullptr;
}
if (YuArg && YuArg->getValue()[0] == '\0') {
Diag(clang::diag::warn_drv_ycyu_no_arg_clang_cl) << YuArg->getSpelling();
Args.eraseArg(options::OPT__SLASH_Yu);
YuArg = nullptr;
}
if (YcArg && YuArg && strcmp(YcArg->getValue(), YuArg->getValue()) != 0) {
Diag(clang::diag::warn_drv_ycyu_different_arg_clang_cl);
Args.eraseArg(options::OPT__SLASH_Yc);
Args.eraseArg(options::OPT__SLASH_Yu);
YcArg = YuArg = nullptr;
}
if (YcArg && Inputs.size() > 1) {
Diag(clang::diag::warn_drv_yc_multiple_inputs_clang_cl);
Args.eraseArg(options::OPT__SLASH_Yc);
YcArg = nullptr;
}
- if (Args.hasArg(options::OPT__SLASH_Y_)) {
- // /Y- disables all pch handling. Rather than check for it everywhere,
- // just remove clang-cl pch-related flags here.
+ if (FinalPhase == phases::Preprocess || Args.hasArg(options::OPT__SLASH_Y_)) {
+ // If only preprocessing or /Y- is used, all pch handling is disabled.
+ // Rather than check for it everywhere, just remove clang-cl pch-related
+ // flags here.
Args.eraseArg(options::OPT__SLASH_Fp);
Args.eraseArg(options::OPT__SLASH_Yc);
Args.eraseArg(options::OPT__SLASH_Yu);
YcArg = YuArg = nullptr;
}
// Builder to be used to build offloading actions.
OffloadingActionBuilder OffloadBuilder(C, Args, Inputs);
// Construct the actions to perform.
ActionList LinkerInputs;
llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PL;
for (auto &I : Inputs) {
types::ID InputType = I.first;
const Arg *InputArg = I.second;
PL.clear();
types::getCompilationPhases(InputType, PL);
// If the first step comes after the final phase we are doing as part of
// this compilation, warn the user about it.
phases::ID InitialPhase = PL[0];
if (InitialPhase > FinalPhase) {
if (InputArg->isClaimed())
continue;
// Claim here to avoid the more general unused warning.
InputArg->claim();
// Suppress all unused style warnings with -Qunused-arguments
if (Args.hasArg(options::OPT_Qunused_arguments))
continue;
// Special case when final phase determined by binary name, rather than
// by a command-line argument with a corresponding Arg.
if (CCCIsCPP())
Diag(clang::diag::warn_drv_input_file_unused_by_cpp)
<< InputArg->getAsString(Args) << getPhaseName(InitialPhase);
// Special case '-E' warning on a previously preprocessed file to make
// more sense.
else if (InitialPhase == phases::Compile &&
FinalPhase == phases::Preprocess &&
getPreprocessedType(InputType) == types::TY_INVALID)
Diag(clang::diag::warn_drv_preprocessed_input_file_unused)
<< InputArg->getAsString(Args) << !!FinalPhaseArg
<< (FinalPhaseArg ? FinalPhaseArg->getOption().getName() : "");
else
Diag(clang::diag::warn_drv_input_file_unused)
<< InputArg->getAsString(Args) << getPhaseName(InitialPhase)
<< !!FinalPhaseArg
<< (FinalPhaseArg ? FinalPhaseArg->getOption().getName() : "");
continue;
}
if (YcArg) {
// Add a separate precompile phase for the compile phase.
if (FinalPhase >= phases::Compile) {
const types::ID HeaderType = lookupHeaderTypeForSourceType(InputType);
llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PCHPL;
types::getCompilationPhases(HeaderType, PCHPL);
// Build the pipeline for the pch file.
Action *ClangClPch =
C.MakeAction<InputAction>(*InputArg, HeaderType);
for (phases::ID Phase : PCHPL)
ClangClPch = ConstructPhaseAction(C, Args, Phase, ClangClPch);
assert(ClangClPch);
Actions.push_back(ClangClPch);
// The driver currently exits after the first failed command. This
// relies on that behavior, to make sure if the pch generation fails,
// the main compilation won't run.
// FIXME: If the main compilation fails, the PCH generation should
// probably not be considered successful either.
}
}
// Build the pipeline for this file.
Action *Current = C.MakeAction<InputAction>(*InputArg, InputType);
// Use the current host action in any of the offloading actions, if
// required.
if (OffloadBuilder.addHostDependenceToDeviceActions(Current, InputArg))
break;
for (SmallVectorImpl<phases::ID>::iterator i = PL.begin(), e = PL.end();
i != e; ++i) {
phases::ID Phase = *i;
// We are done if this step is past what the user requested.
if (Phase > FinalPhase)
break;
// Add any offload action the host action depends on.
Current = OffloadBuilder.addDeviceDependencesToHostAction(
Current, InputArg, Phase, FinalPhase, PL);
if (!Current)
break;
// Queue linker inputs.
if (Phase == phases::Link) {
assert((i + 1) == e && "linking must be final compilation step.");
LinkerInputs.push_back(Current);
Current = nullptr;
break;
}
// Otherwise construct the appropriate action.
auto *NewCurrent = ConstructPhaseAction(C, Args, Phase, Current);
// We didn't create a new action, so we will just move to the next phase.
if (NewCurrent == Current)
continue;
Current = NewCurrent;
// Use the current host action in any of the offloading actions, if
// required.
if (OffloadBuilder.addHostDependenceToDeviceActions(Current, InputArg))
break;
if (Current->getType() == types::TY_Nothing)
break;
}
// If we ended with something, add to the output list.
if (Current)
Actions.push_back(Current);
// Add any top level actions generated for offloading.
OffloadBuilder.appendTopLevelActions(Actions, Current, InputArg);
}
// Add a link action if necessary.
if (!LinkerInputs.empty()) {
Action *LA = C.MakeAction<LinkJobAction>(LinkerInputs, types::TY_Image);
LA = OffloadBuilder.processHostLinkAction(LA);
Actions.push_back(LA);
}
// If we are linking, claim any options which are obviously only used for
// compilation.
if (FinalPhase == phases::Link && PL.size() == 1) {
Args.ClaimAllArgs(options::OPT_CompileOnly_Group);
Args.ClaimAllArgs(options::OPT_cl_compile_Group);
}
// Claim ignored clang-cl options.
Args.ClaimAllArgs(options::OPT_cl_ignored_Group);
// Claim --cuda-host-only and --cuda-compile-host-device, which may be passed
// to non-CUDA compilations and should not trigger warnings there.
Args.ClaimAllArgs(options::OPT_cuda_host_only);
Args.ClaimAllArgs(options::OPT_cuda_compile_host_device);
}
Action *Driver::ConstructPhaseAction(
Compilation &C, const ArgList &Args, phases::ID Phase, Action *Input,
Action::OffloadKind TargetDeviceOffloadKind) const {
llvm::PrettyStackTraceString CrashInfo("Constructing phase actions");
// Some types skip the assembler phase (e.g., llvm-bc), but we can't
// encode this in the steps because the intermediate type depends on
// arguments. Just special case here.
if (Phase == phases::Assemble && Input->getType() != types::TY_PP_Asm)
return Input;
// Build the appropriate action.
switch (Phase) {
case phases::Link:
llvm_unreachable("link action invalid here.");
case phases::Preprocess: {
types::ID OutputTy;
// -{M, MM} alter the output type.
if (Args.hasArg(options::OPT_M, options::OPT_MM)) {
OutputTy = types::TY_Dependencies;
} else {
OutputTy = Input->getType();
if (!Args.hasFlag(options::OPT_frewrite_includes,
options::OPT_fno_rewrite_includes, false) &&
!Args.hasFlag(options::OPT_frewrite_imports,
options::OPT_fno_rewrite_imports, false) &&
!CCGenDiagnostics)
OutputTy = types::getPreprocessedType(OutputTy);
assert(OutputTy != types::TY_INVALID &&
"Cannot preprocess this input type!");
}
return C.MakeAction<PreprocessJobAction>(Input, OutputTy);
}
case phases::Precompile: {
types::ID OutputTy = getPrecompiledType(Input->getType());
assert(OutputTy != types::TY_INVALID &&
"Cannot precompile this input type!");
if (Args.hasArg(options::OPT_fsyntax_only)) {
// Syntax checks should not emit a PCH file
OutputTy = types::TY_Nothing;
}
return C.MakeAction<PrecompileJobAction>(Input, OutputTy);
}
case phases::Compile: {
if (Args.hasArg(options::OPT_fsyntax_only))
return C.MakeAction<CompileJobAction>(Input, types::TY_Nothing);
if (Args.hasArg(options::OPT_rewrite_objc))
return C.MakeAction<CompileJobAction>(Input, types::TY_RewrittenObjC);
if (Args.hasArg(options::OPT_rewrite_legacy_objc))
return C.MakeAction<CompileJobAction>(Input,
types::TY_RewrittenLegacyObjC);
if (Args.hasArg(options::OPT__analyze, options::OPT__analyze_auto))
return C.MakeAction<AnalyzeJobAction>(Input, types::TY_Plist);
if (Args.hasArg(options::OPT__migrate))
return C.MakeAction<MigrateJobAction>(Input, types::TY_Remap);
if (Args.hasArg(options::OPT_emit_ast))
return C.MakeAction<CompileJobAction>(Input, types::TY_AST);
if (Args.hasArg(options::OPT_module_file_info))
return C.MakeAction<CompileJobAction>(Input, types::TY_ModuleFile);
if (Args.hasArg(options::OPT_verify_pch))
return C.MakeAction<VerifyPCHJobAction>(Input, types::TY_Nothing);
return C.MakeAction<CompileJobAction>(Input, types::TY_LLVM_BC);
}
case phases::Backend: {
if (isUsingLTO() && TargetDeviceOffloadKind == Action::OFK_None) {
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
}
if (Args.hasArg(options::OPT_emit_llvm)) {
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LLVM_IR : types::TY_LLVM_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
}
return C.MakeAction<BackendJobAction>(Input, types::TY_PP_Asm);
}
case phases::Assemble:
return C.MakeAction<AssembleJobAction>(std::move(Input), types::TY_Object);
}
llvm_unreachable("invalid phase in ConstructPhaseAction");
}
void Driver::BuildJobs(Compilation &C) const {
llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o);
// It is an error to provide a -o option if we are making multiple output
// files.
if (FinalOutput) {
unsigned NumOutputs = 0;
for (const Action *A : C.getActions())
if (A->getType() != types::TY_Nothing)
++NumOutputs;
if (NumOutputs > 1) {
Diag(clang::diag::err_drv_output_argument_with_multiple_files);
FinalOutput = nullptr;
}
}
// Collect the list of architectures.
llvm::StringSet<> ArchNames;
if (C.getDefaultToolChain().getTriple().isOSBinFormatMachO())
for (const Arg *A : C.getArgs())
if (A->getOption().matches(options::OPT_arch))
ArchNames.insert(A->getValue());
// Set of (Action, canonical ToolChain triple) pairs we've built jobs for.
std::map<std::pair<const Action *, std::string>, InputInfo> CachedResults;
for (Action *A : C.getActions()) {
// If we are linking an image for multiple archs then the linker wants
// -arch_multiple and -final_output <final image name>. Unfortunately, this
// doesn't fit in cleanly because we have to pass this information down.
//
// FIXME: This is a hack; find a cleaner way to integrate this into the
// process.
const char *LinkingOutput = nullptr;
if (isa<LipoJobAction>(A)) {
if (FinalOutput)
LinkingOutput = FinalOutput->getValue();
else
LinkingOutput = getDefaultImageName();
}
BuildJobsForAction(C, A, &C.getDefaultToolChain(),
/*BoundArch*/ StringRef(),
/*AtTopLevel*/ true,
/*MultipleArchs*/ ArchNames.size() > 1,
/*LinkingOutput*/ LinkingOutput, CachedResults,
/*TargetDeviceOffloadKind*/ Action::OFK_None);
}
// If the user passed -Qunused-arguments or there were errors, don't warn
// about any unused arguments.
if (Diags.hasErrorOccurred() ||
C.getArgs().hasArg(options::OPT_Qunused_arguments))
return;
// Claim -### here.
(void)C.getArgs().hasArg(options::OPT__HASH_HASH_HASH);
// Claim --driver-mode, --rsp-quoting, it was handled earlier.
(void)C.getArgs().hasArg(options::OPT_driver_mode);
(void)C.getArgs().hasArg(options::OPT_rsp_quoting);
for (Arg *A : C.getArgs()) {
// FIXME: It would be nice to be able to send the argument to the
// DiagnosticsEngine, so that extra values, position, and so on could be
// printed.
if (!A->isClaimed()) {
if (A->getOption().hasFlag(options::NoArgumentUnused))
continue;
// Suppress the warning automatically if this is just a flag, and it is an
// instance of an argument we already claimed.
const Option &Opt = A->getOption();
if (Opt.getKind() == Option::FlagClass) {
bool DuplicateClaimed = false;
for (const Arg *AA : C.getArgs().filtered(&Opt)) {
if (AA->isClaimed()) {
DuplicateClaimed = true;
break;
}
}
if (DuplicateClaimed)
continue;
}
// In clang-cl, don't mention unknown arguments here since they have
// already been warned about.
if (!IsCLMode() || !A->getOption().matches(options::OPT_UNKNOWN))
Diag(clang::diag::warn_drv_unused_argument)
<< A->getAsString(C.getArgs());
}
}
}
namespace {
/// Utility class to control the collapse of dependent actions and select the
/// tools accordingly.
class ToolSelector final {
/// The tool chain this selector refers to.
const ToolChain &TC;
/// The compilation this selector refers to.
const Compilation &C;
/// The base action this selector refers to.
const JobAction *BaseAction;
/// Set to true if the current toolchain refers to host actions.
bool IsHostSelector;
/// Set to true if save-temps and embed-bitcode functionalities are active.
bool SaveTemps;
bool EmbedBitcode;
/// Get previous dependent action or null if that does not exist. If
/// \a CanBeCollapsed is false, that action must be legal to collapse or
/// null will be returned.
const JobAction *getPrevDependentAction(const ActionList &Inputs,
ActionList &SavedOffloadAction,
bool CanBeCollapsed = true) {
// An option can be collapsed only if it has a single input.
if (Inputs.size() != 1)
return nullptr;
Action *CurAction = *Inputs.begin();
if (CanBeCollapsed &&
!CurAction->isCollapsingWithNextDependentActionLegal())
return nullptr;
// If the input action is an offload action. Look through it and save any
// offload action that can be dropped in the event of a collapse.
if (auto *OA = dyn_cast<OffloadAction>(CurAction)) {
// If the dependent action is a device action, we will attempt to collapse
// only with other device actions. Otherwise, we would do the same but
// with host actions only.
if (!IsHostSelector) {
if (OA->hasSingleDeviceDependence(/*DoNotConsiderHostActions=*/true)) {
CurAction =
OA->getSingleDeviceDependence(/*DoNotConsiderHostActions=*/true);
if (CanBeCollapsed &&
!CurAction->isCollapsingWithNextDependentActionLegal())
return nullptr;
SavedOffloadAction.push_back(OA);
return dyn_cast<JobAction>(CurAction);
}
} else if (OA->hasHostDependence()) {
CurAction = OA->getHostDependence();
if (CanBeCollapsed &&
!CurAction->isCollapsingWithNextDependentActionLegal())
return nullptr;
SavedOffloadAction.push_back(OA);
return dyn_cast<JobAction>(CurAction);
}
return nullptr;
}
return dyn_cast<JobAction>(CurAction);
}
/// Return true if an assemble action can be collapsed.
bool canCollapseAssembleAction() const {
return TC.useIntegratedAs() && !SaveTemps &&
!C.getArgs().hasArg(options::OPT_via_file_asm) &&
!C.getArgs().hasArg(options::OPT__SLASH_FA) &&
!C.getArgs().hasArg(options::OPT__SLASH_Fa);
}
/// Return true if a preprocessor action can be collapsed.
bool canCollapsePreprocessorAction() const {
return !C.getArgs().hasArg(options::OPT_no_integrated_cpp) &&
!C.getArgs().hasArg(options::OPT_traditional_cpp) && !SaveTemps &&
!C.getArgs().hasArg(options::OPT_rewrite_objc);
}
/// Struct that relates an action with the offload actions that would be
/// collapsed with it.
struct JobActionInfo final {
/// The action this info refers to.
const JobAction *JA = nullptr;
/// The offload actions we need to take care off if this action is
/// collapsed.
ActionList SavedOffloadAction;
};
/// Append collapsed offload actions from the give nnumber of elements in the
/// action info array.
static void AppendCollapsedOffloadAction(ActionList &CollapsedOffloadAction,
ArrayRef<JobActionInfo> &ActionInfo,
unsigned ElementNum) {
assert(ElementNum <= ActionInfo.size() && "Invalid number of elements.");
for (unsigned I = 0; I < ElementNum; ++I)
CollapsedOffloadAction.append(ActionInfo[I].SavedOffloadAction.begin(),
ActionInfo[I].SavedOffloadAction.end());
}
/// Functions that attempt to perform the combining. They detect if that is
/// legal, and if so they update the inputs \a Inputs and the offload action
/// that were collapsed in \a CollapsedOffloadAction. A tool that deals with
/// the combined action is returned. If the combining is not legal or if the
/// tool does not exist, null is returned.
/// Currently three kinds of collapsing are supported:
/// - Assemble + Backend + Compile;
/// - Assemble + Backend ;
/// - Backend + Compile.
const Tool *
combineAssembleBackendCompile(ArrayRef<JobActionInfo> ActionInfo,
const ActionList *&Inputs,
ActionList &CollapsedOffloadAction) {
if (ActionInfo.size() < 3 || !canCollapseAssembleAction())
return nullptr;
auto *AJ = dyn_cast<AssembleJobAction>(ActionInfo[0].JA);
auto *BJ = dyn_cast<BackendJobAction>(ActionInfo[1].JA);
auto *CJ = dyn_cast<CompileJobAction>(ActionInfo[2].JA);
if (!AJ || !BJ || !CJ)
return nullptr;
// Get compiler tool.
const Tool *T = TC.SelectTool(*CJ);
if (!T)
return nullptr;
// When using -fembed-bitcode, it is required to have the same tool (clang)
// for both CompilerJA and BackendJA. Otherwise, combine two stages.
if (EmbedBitcode) {
const Tool *BT = TC.SelectTool(*BJ);
if (BT == T)
return nullptr;
}
if (!T->hasIntegratedAssembler())
return nullptr;
Inputs = &CJ->getInputs();
AppendCollapsedOffloadAction(CollapsedOffloadAction, ActionInfo,
/*NumElements=*/3);
return T;
}
const Tool *combineAssembleBackend(ArrayRef<JobActionInfo> ActionInfo,
const ActionList *&Inputs,
ActionList &CollapsedOffloadAction) {
if (ActionInfo.size() < 2 || !canCollapseAssembleAction())
return nullptr;
auto *AJ = dyn_cast<AssembleJobAction>(ActionInfo[0].JA);
auto *BJ = dyn_cast<BackendJobAction>(ActionInfo[1].JA);
if (!AJ || !BJ)
return nullptr;
// Retrieve the compile job, backend action must always be preceded by one.
ActionList CompileJobOffloadActions;
auto *CJ = getPrevDependentAction(BJ->getInputs(), CompileJobOffloadActions,
/*CanBeCollapsed=*/false);
if (!AJ || !BJ || !CJ)
return nullptr;
assert(isa<CompileJobAction>(CJ) &&
"Expecting compile job preceding backend job.");
// Get compiler tool.
const Tool *T = TC.SelectTool(*CJ);
if (!T)
return nullptr;
if (!T->hasIntegratedAssembler())
return nullptr;
Inputs = &BJ->getInputs();
AppendCollapsedOffloadAction(CollapsedOffloadAction, ActionInfo,
/*NumElements=*/2);
return T;
}
const Tool *combineBackendCompile(ArrayRef<JobActionInfo> ActionInfo,
const ActionList *&Inputs,
ActionList &CollapsedOffloadAction) {
if (ActionInfo.size() < 2)
return nullptr;
auto *BJ = dyn_cast<BackendJobAction>(ActionInfo[0].JA);
auto *CJ = dyn_cast<CompileJobAction>(ActionInfo[1].JA);
if (!BJ || !CJ)
return nullptr;
// Check if the initial input (to the compile job or its predessor if one
// exists) is LLVM bitcode. In that case, no preprocessor step is required
// and we can still collapse the compile and backend jobs when we have
// -save-temps. I.e. there is no need for a separate compile job just to
// emit unoptimized bitcode.
bool InputIsBitcode = true;
for (size_t i = 1; i < ActionInfo.size(); i++)
if (ActionInfo[i].JA->getType() != types::TY_LLVM_BC &&
ActionInfo[i].JA->getType() != types::TY_LTO_BC) {
InputIsBitcode = false;
break;
}
if (!InputIsBitcode && !canCollapsePreprocessorAction())
return nullptr;
// Get compiler tool.
const Tool *T = TC.SelectTool(*CJ);
if (!T)
return nullptr;
if (T->canEmitIR() && ((SaveTemps && !InputIsBitcode) || EmbedBitcode))
return nullptr;
Inputs = &CJ->getInputs();
AppendCollapsedOffloadAction(CollapsedOffloadAction, ActionInfo,
/*NumElements=*/2);
return T;
}
/// Updates the inputs if the obtained tool supports combining with
/// preprocessor action, and the current input is indeed a preprocessor
/// action. If combining results in the collapse of offloading actions, those
/// are appended to \a CollapsedOffloadAction.
void combineWithPreprocessor(const Tool *T, const ActionList *&Inputs,
ActionList &CollapsedOffloadAction) {
if (!T || !canCollapsePreprocessorAction() || !T->hasIntegratedCPP())
return;
// Attempt to get a preprocessor action dependence.
ActionList PreprocessJobOffloadActions;
auto *PJ = getPrevDependentAction(*Inputs, PreprocessJobOffloadActions);
if (!PJ || !isa<PreprocessJobAction>(PJ))
return;
// This is legal to combine. Append any offload action we found and set the
// current inputs to preprocessor inputs.
CollapsedOffloadAction.append(PreprocessJobOffloadActions.begin(),
PreprocessJobOffloadActions.end());
Inputs = &PJ->getInputs();
}
public:
ToolSelector(const JobAction *BaseAction, const ToolChain &TC,
const Compilation &C, bool SaveTemps, bool EmbedBitcode)
: TC(TC), C(C), BaseAction(BaseAction), SaveTemps(SaveTemps),
EmbedBitcode(EmbedBitcode) {
assert(BaseAction && "Invalid base action.");
IsHostSelector = BaseAction->getOffloadingDeviceKind() == Action::OFK_None;
}
/// Check if a chain of actions can be combined and return the tool that can
/// handle the combination of actions. The pointer to the current inputs \a
/// Inputs and the list of offload actions \a CollapsedOffloadActions
/// connected to collapsed actions are updated accordingly. The latter enables
/// the caller of the selector to process them afterwards instead of just
/// dropping them. If no suitable tool is found, null will be returned.
const Tool *getTool(const ActionList *&Inputs,
ActionList &CollapsedOffloadAction) {
//
// Get the largest chain of actions that we could combine.
//
SmallVector<JobActionInfo, 5> ActionChain(1);
ActionChain.back().JA = BaseAction;
while (ActionChain.back().JA) {
const Action *CurAction = ActionChain.back().JA;
// Grow the chain by one element.
ActionChain.resize(ActionChain.size() + 1);
JobActionInfo &AI = ActionChain.back();
// Attempt to fill it with the
AI.JA =
getPrevDependentAction(CurAction->getInputs(), AI.SavedOffloadAction);
}
// Pop the last action info as it could not be filled.
ActionChain.pop_back();
//
// Attempt to combine actions. If all combining attempts failed, just return
// the tool of the provided action. At the end we attempt to combine the
// action with any preprocessor action it may depend on.
//
const Tool *T = combineAssembleBackendCompile(ActionChain, Inputs,
CollapsedOffloadAction);
if (!T)
T = combineAssembleBackend(ActionChain, Inputs, CollapsedOffloadAction);
if (!T)
T = combineBackendCompile(ActionChain, Inputs, CollapsedOffloadAction);
if (!T) {
Inputs = &BaseAction->getInputs();
T = TC.SelectTool(*BaseAction);
}
combineWithPreprocessor(T, Inputs, CollapsedOffloadAction);
return T;
}
};
}
/// Return a string that uniquely identifies the result of a job. The bound arch
/// is not necessarily represented in the toolchain's triple -- for example,
/// armv7 and armv7s both map to the same triple -- so we need both in our map.
/// Also, we need to add the offloading device kind, as the same tool chain can
/// be used for host and device for some programming models, e.g. OpenMP.
static std::string GetTriplePlusArchString(const ToolChain *TC,
StringRef BoundArch,
Action::OffloadKind OffloadKind) {
std::string TriplePlusArch = TC->getTriple().normalize();
if (!BoundArch.empty()) {
TriplePlusArch += "-";
TriplePlusArch += BoundArch;
}
TriplePlusArch += "-";
TriplePlusArch += Action::GetOffloadKindName(OffloadKind);
return TriplePlusArch;
}
InputInfo Driver::BuildJobsForAction(
Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
std::map<std::pair<const Action *, std::string>, InputInfo> &CachedResults,
Action::OffloadKind TargetDeviceOffloadKind) const {
std::pair<const Action *, std::string> ActionTC = {
A, GetTriplePlusArchString(TC, BoundArch, TargetDeviceOffloadKind)};
auto CachedResult = CachedResults.find(ActionTC);
if (CachedResult != CachedResults.end()) {
return CachedResult->second;
}
InputInfo Result = BuildJobsForActionNoCache(
C, A, TC, BoundArch, AtTopLevel, MultipleArchs, LinkingOutput,
CachedResults, TargetDeviceOffloadKind);
CachedResults[ActionTC] = Result;
return Result;
}
InputInfo Driver::BuildJobsForActionNoCache(
Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
std::map<std::pair<const Action *, std::string>, InputInfo> &CachedResults,
Action::OffloadKind TargetDeviceOffloadKind) const {
llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
InputInfoList OffloadDependencesInputInfo;
bool BuildingForOffloadDevice = TargetDeviceOffloadKind != Action::OFK_None;
if (const OffloadAction *OA = dyn_cast<OffloadAction>(A)) {
// The 'Darwin' toolchain is initialized only when its arguments are
// computed. Get the default arguments for OFK_None to ensure that
// initialization is performed before processing the offload action.
// FIXME: Remove when darwin's toolchain is initialized during construction.
C.getArgsForToolChain(TC, BoundArch, Action::OFK_None);
// The offload action is expected to be used in four different situations.
//
// a) Set a toolchain/architecture/kind for a host action:
// Host Action 1 -> OffloadAction -> Host Action 2
//
// b) Set a toolchain/architecture/kind for a device action;
// Device Action 1 -> OffloadAction -> Device Action 2
//
// c) Specify a device dependence to a host action;
// Device Action 1 _
// \
// Host Action 1 ---> OffloadAction -> Host Action 2
//
// d) Specify a host dependence to a device action.
// Host Action 1 _
// \
// Device Action 1 ---> OffloadAction -> Device Action 2
//
// For a) and b), we just return the job generated for the dependence. For
// c) and d) we override the current action with the host/device dependence
// if the current toolchain is host/device and set the offload dependences
// info with the jobs obtained from the device/host dependence(s).
// If there is a single device option, just generate the job for it.
if (OA->hasSingleDeviceDependence()) {
InputInfo DevA;
OA->doOnEachDeviceDependence([&](Action *DepA, const ToolChain *DepTC,
const char *DepBoundArch) {
DevA =
BuildJobsForAction(C, DepA, DepTC, DepBoundArch, AtTopLevel,
/*MultipleArchs*/ !!DepBoundArch, LinkingOutput,
CachedResults, DepA->getOffloadingDeviceKind());
});
return DevA;
}
// If 'Action 2' is host, we generate jobs for the device dependences and
// override the current action with the host dependence. Otherwise, we
// generate the host dependences and override the action with the device
// dependence. The dependences can't therefore be a top-level action.
OA->doOnEachDependence(
/*IsHostDependence=*/BuildingForOffloadDevice,
[&](Action *DepA, const ToolChain *DepTC, const char *DepBoundArch) {
OffloadDependencesInputInfo.push_back(BuildJobsForAction(
C, DepA, DepTC, DepBoundArch, /*AtTopLevel=*/false,
/*MultipleArchs*/ !!DepBoundArch, LinkingOutput, CachedResults,
DepA->getOffloadingDeviceKind()));
});
A = BuildingForOffloadDevice
? OA->getSingleDeviceDependence(/*DoNotConsiderHostActions=*/true)
: OA->getHostDependence();
}
if (const InputAction *IA = dyn_cast<InputAction>(A)) {
// FIXME: It would be nice to not claim this here; maybe the old scheme of
// just using Args was better?
const Arg &Input = IA->getInputArg();
Input.claim();
if (Input.getOption().matches(options::OPT_INPUT)) {
const char *Name = Input.getValue();
return InputInfo(A, Name, /* BaseInput = */ Name);
}
return InputInfo(A, &Input, /* BaseInput = */ "");
}
if (const BindArchAction *BAA = dyn_cast<BindArchAction>(A)) {
const ToolChain *TC;
StringRef ArchName = BAA->getArchName();
if (!ArchName.empty())
TC = &getToolChain(C.getArgs(),
computeTargetTriple(*this, TargetTriple,
C.getArgs(), ArchName));
else
TC = &C.getDefaultToolChain();
return BuildJobsForAction(C, *BAA->input_begin(), TC, ArchName, AtTopLevel,
MultipleArchs, LinkingOutput, CachedResults,
TargetDeviceOffloadKind);
}
const ActionList *Inputs = &A->getInputs();
const JobAction *JA = cast<JobAction>(A);
ActionList CollapsedOffloadActions;
ToolSelector TS(JA, *TC, C, isSaveTempsEnabled(),
embedBitcodeInObject() && !isUsingLTO());
const Tool *T = TS.getTool(Inputs, CollapsedOffloadActions);
if (!T)
return InputInfo();
// If we've collapsed action list that contained OffloadAction we
// need to build jobs for host/device-side inputs it may have held.
for (const auto *OA : CollapsedOffloadActions)
cast<OffloadAction>(OA)->doOnEachDependence(
/*IsHostDependence=*/BuildingForOffloadDevice,
[&](Action *DepA, const ToolChain *DepTC, const char *DepBoundArch) {
OffloadDependencesInputInfo.push_back(BuildJobsForAction(
C, DepA, DepTC, DepBoundArch, /* AtTopLevel */ false,
/*MultipleArchs=*/!!DepBoundArch, LinkingOutput, CachedResults,
DepA->getOffloadingDeviceKind()));
});
// Only use pipes when there is exactly one input.
InputInfoList InputInfos;
for (const Action *Input : *Inputs) {
// Treat dsymutil and verify sub-jobs as being at the top-level too, they
// shouldn't get temporary output names.
// FIXME: Clean this up.
bool SubJobAtTopLevel =
AtTopLevel && (isa<DsymutilJobAction>(A) || isa<VerifyJobAction>(A));
InputInfos.push_back(BuildJobsForAction(
C, Input, TC, BoundArch, SubJobAtTopLevel, MultipleArchs, LinkingOutput,
CachedResults, A->getOffloadingDeviceKind()));
}
// Always use the first input as the base input.
const char *BaseInput = InputInfos[0].getBaseInput();
// ... except dsymutil actions, which use their actual input as the base
// input.
if (JA->getType() == types::TY_dSYM)
BaseInput = InputInfos[0].getFilename();
// Append outputs of offload device jobs to the input list
if (!OffloadDependencesInputInfo.empty())
InputInfos.append(OffloadDependencesInputInfo.begin(),
OffloadDependencesInputInfo.end());
// Set the effective triple of the toolchain for the duration of this job.
llvm::Triple EffectiveTriple;
const ToolChain &ToolTC = T->getToolChain();
const ArgList &Args =
C.getArgsForToolChain(TC, BoundArch, A->getOffloadingDeviceKind());
if (InputInfos.size() != 1) {
EffectiveTriple = llvm::Triple(ToolTC.ComputeEffectiveClangTriple(Args));
} else {
// Pass along the input type if it can be unambiguously determined.
EffectiveTriple = llvm::Triple(
ToolTC.ComputeEffectiveClangTriple(Args, InputInfos[0].getType()));
}
RegisterEffectiveTriple TripleRAII(ToolTC, EffectiveTriple);
// Determine the place to write output to, if any.
InputInfo Result;
InputInfoList UnbundlingResults;
if (auto *UA = dyn_cast<OffloadUnbundlingJobAction>(JA)) {
// If we have an unbundling job, we need to create results for all the
// outputs. We also update the results cache so that other actions using
// this unbundling action can get the right results.
for (auto &UI : UA->getDependentActionsInfo()) {
assert(UI.DependentOffloadKind != Action::OFK_None &&
"Unbundling with no offloading??");
// Unbundling actions are never at the top level. When we generate the
// offloading prefix, we also do that for the host file because the
// unbundling action does not change the type of the output which can
// cause a overwrite.
std::string OffloadingPrefix = Action::GetOffloadingFileNamePrefix(
UI.DependentOffloadKind,
UI.DependentToolChain->getTriple().normalize(),
/*CreatePrefixForHost=*/true);
auto CurI = InputInfo(
UA,
GetNamedOutputPath(C, *UA, BaseInput, UI.DependentBoundArch,
/*AtTopLevel=*/false,
MultipleArchs ||
UI.DependentOffloadKind == Action::OFK_HIP,
OffloadingPrefix),
BaseInput);
// Save the unbundling result.
UnbundlingResults.push_back(CurI);
// Get the unique string identifier for this dependence and cache the
// result.
StringRef Arch;
if (TargetDeviceOffloadKind == Action::OFK_HIP) {
if (UI.DependentOffloadKind == Action::OFK_Host)
Arch = StringRef();
else
Arch = UI.DependentBoundArch;
} else
Arch = BoundArch;
CachedResults[{A, GetTriplePlusArchString(UI.DependentToolChain, Arch,
UI.DependentOffloadKind)}] =
CurI;
}
// Now that we have all the results generated, select the one that should be
// returned for the current depending action.
std::pair<const Action *, std::string> ActionTC = {
A, GetTriplePlusArchString(TC, BoundArch, TargetDeviceOffloadKind)};
assert(CachedResults.find(ActionTC) != CachedResults.end() &&
"Result does not exist??");
Result = CachedResults[ActionTC];
} else if (JA->getType() == types::TY_Nothing)
Result = InputInfo(A, BaseInput);
else {
// We only have to generate a prefix for the host if this is not a top-level
// action.
std::string OffloadingPrefix = Action::GetOffloadingFileNamePrefix(
A->getOffloadingDeviceKind(), TC->getTriple().normalize(),
/*CreatePrefixForHost=*/!!A->getOffloadingHostActiveKinds() &&
!AtTopLevel);
Result = InputInfo(A, GetNamedOutputPath(C, *JA, BaseInput, BoundArch,
AtTopLevel, MultipleArchs,
OffloadingPrefix),
BaseInput);
}
if (CCCPrintBindings && !CCGenDiagnostics) {
llvm::errs() << "# \"" << T->getToolChain().getTripleString() << '"'
<< " - \"" << T->getName() << "\", inputs: [";
for (unsigned i = 0, e = InputInfos.size(); i != e; ++i) {
llvm::errs() << InputInfos[i].getAsString();
if (i + 1 != e)
llvm::errs() << ", ";
}
if (UnbundlingResults.empty())
llvm::errs() << "], output: " << Result.getAsString() << "\n";
else {
llvm::errs() << "], outputs: [";
for (unsigned i = 0, e = UnbundlingResults.size(); i != e; ++i) {
llvm::errs() << UnbundlingResults[i].getAsString();
if (i + 1 != e)
llvm::errs() << ", ";
}
llvm::errs() << "] \n";
}
} else {
if (UnbundlingResults.empty())
T->ConstructJob(
C, *JA, Result, InputInfos,
C.getArgsForToolChain(TC, BoundArch, JA->getOffloadingDeviceKind()),
LinkingOutput);
else
T->ConstructJobMultipleOutputs(
C, *JA, UnbundlingResults, InputInfos,
C.getArgsForToolChain(TC, BoundArch, JA->getOffloadingDeviceKind()),
LinkingOutput);
}
return Result;
}
const char *Driver::getDefaultImageName() const {
llvm::Triple Target(llvm::Triple::normalize(TargetTriple));
return Target.isOSWindows() ? "a.exe" : "a.out";
}
/// Create output filename based on ArgValue, which could either be a
/// full filename, filename without extension, or a directory. If ArgValue
/// does not provide a filename, then use BaseName, and use the extension
/// suitable for FileType.
static const char *MakeCLOutputFilename(const ArgList &Args, StringRef ArgValue,
StringRef BaseName,
types::ID FileType) {
SmallString<128> Filename = ArgValue;
if (ArgValue.empty()) {
// If the argument is empty, output to BaseName in the current dir.
Filename = BaseName;
} else if (llvm::sys::path::is_separator(Filename.back())) {
// If the argument is a directory, output to BaseName in that dir.
llvm::sys::path::append(Filename, BaseName);
}
if (!llvm::sys::path::has_extension(ArgValue)) {
// If the argument didn't provide an extension, then set it.
const char *Extension = types::getTypeTempSuffix(FileType, true);
if (FileType == types::TY_Image &&
Args.hasArg(options::OPT__SLASH_LD, options::OPT__SLASH_LDd)) {
// The output file is a dll.
Extension = "dll";
}
llvm::sys::path::replace_extension(Filename, Extension);
}
return Args.MakeArgString(Filename.c_str());
}
const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
const char *BaseInput,
StringRef BoundArch, bool AtTopLevel,
bool MultipleArchs,
StringRef OffloadingPrefix) const {
llvm::PrettyStackTraceString CrashInfo("Computing output path");
// Output to a user requested destination?
if (AtTopLevel && !isa<DsymutilJobAction>(JA) && !isa<VerifyJobAction>(JA)) {
if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
return C.addResultFile(FinalOutput->getValue(), &JA);
}
// For /P, preprocess to file named after BaseInput.
if (C.getArgs().hasArg(options::OPT__SLASH_P)) {
assert(AtTopLevel && isa<PreprocessJobAction>(JA));
StringRef BaseName = llvm::sys::path::filename(BaseInput);
StringRef NameArg;
if (Arg *A = C.getArgs().getLastArg(options::OPT__SLASH_Fi))
NameArg = A->getValue();
return C.addResultFile(
MakeCLOutputFilename(C.getArgs(), NameArg, BaseName, types::TY_PP_C),
&JA);
}
// Default to writing to stdout?
if (AtTopLevel && !CCGenDiagnostics && isa<PreprocessJobAction>(JA))
return "-";
// Is this the assembly listing for /FA?
if (JA.getType() == types::TY_PP_Asm &&
(C.getArgs().hasArg(options::OPT__SLASH_FA) ||
C.getArgs().hasArg(options::OPT__SLASH_Fa))) {
// Use /Fa and the input filename to determine the asm file name.
StringRef BaseName = llvm::sys::path::filename(BaseInput);
StringRef FaValue = C.getArgs().getLastArgValue(options::OPT__SLASH_Fa);
return C.addResultFile(
MakeCLOutputFilename(C.getArgs(), FaValue, BaseName, JA.getType()),
&JA);
}
// Output to a temporary file?
if ((!AtTopLevel && !isSaveTempsEnabled() &&
!C.getArgs().hasArg(options::OPT__SLASH_Fo)) ||
CCGenDiagnostics) {
StringRef Name = llvm::sys::path::filename(BaseInput);
std::pair<StringRef, StringRef> Split = Name.split('.');
SmallString<128> TmpName;
const char *Suffix = types::getTypeTempSuffix(JA.getType(), IsCLMode());
Arg *A = C.getArgs().getLastArg(options::OPT_fcrash_diagnostics_dir);
if (CCGenDiagnostics && A) {
SmallString<128> CrashDirectory(A->getValue());
llvm::sys::path::append(CrashDirectory, Split.first);
const char *Middle = Suffix ? "-%%%%%%." : "-%%%%%%";
std::error_code EC =
llvm::sys::fs::createUniqueFile(CrashDirectory + Middle + Suffix, TmpName);
if (EC) {
Diag(clang::diag::err_unable_to_make_temp) << EC.message();
return "";
}
} else {
TmpName = GetTemporaryPath(Split.first, Suffix);
}
return C.addTempFile(C.getArgs().MakeArgString(TmpName));
}
SmallString<128> BasePath(BaseInput);
StringRef BaseName;
// Dsymutil actions should use the full path.
if (isa<DsymutilJobAction>(JA) || isa<VerifyJobAction>(JA))
BaseName = BasePath;
else
BaseName = llvm::sys::path::filename(BasePath);
// Determine what the derived output name should be.
const char *NamedOutput;
if ((JA.getType() == types::TY_Object || JA.getType() == types::TY_LTO_BC) &&
C.getArgs().hasArg(options::OPT__SLASH_Fo, options::OPT__SLASH_o)) {
// The /Fo or /o flag decides the object filename.
StringRef Val =
C.getArgs()
.getLastArg(options::OPT__SLASH_Fo, options::OPT__SLASH_o)
->getValue();
NamedOutput =
MakeCLOutputFilename(C.getArgs(), Val, BaseName, types::TY_Object);
} else if (JA.getType() == types::TY_Image &&
C.getArgs().hasArg(options::OPT__SLASH_Fe,
options::OPT__SLASH_o)) {
// The /Fe or /o flag names the linked file.
StringRef Val =
C.getArgs()
.getLastArg(options::OPT__SLASH_Fe, options::OPT__SLASH_o)
->getValue();
NamedOutput =
MakeCLOutputFilename(C.getArgs(), Val, BaseName, types::TY_Image);
} else if (JA.getType() == types::TY_Image) {
if (IsCLMode()) {
// clang-cl uses BaseName for the executable name.
NamedOutput =
MakeCLOutputFilename(C.getArgs(), "", BaseName, types::TY_Image);
} else {
SmallString<128> Output(getDefaultImageName());
Output += OffloadingPrefix;
if (MultipleArchs && !BoundArch.empty()) {
Output += "-";
Output.append(BoundArch);
}
NamedOutput = C.getArgs().MakeArgString(Output.c_str());
}
} else if (JA.getType() == types::TY_PCH && IsCLMode()) {
NamedOutput = C.getArgs().MakeArgString(GetClPchPath(C, BaseName));
} else {
const char *Suffix = types::getTypeTempSuffix(JA.getType(), IsCLMode());
assert(Suffix && "All types used for output should have a suffix.");
std::string::size_type End = std::string::npos;
if (!types::appendSuffixForType(JA.getType()))
End = BaseName.rfind('.');
SmallString<128> Suffixed(BaseName.substr(0, End));
Suffixed += OffloadingPrefix;
if (MultipleArchs && !BoundArch.empty()) {
Suffixed += "-";
Suffixed.append(BoundArch);
}
// When using both -save-temps and -emit-llvm, use a ".tmp.bc" suffix for
// the unoptimized bitcode so that it does not get overwritten by the ".bc"
// optimized bitcode output.
if (!AtTopLevel && C.getArgs().hasArg(options::OPT_emit_llvm) &&
JA.getType() == types::TY_LLVM_BC)
Suffixed += ".tmp";
Suffixed += '.';
Suffixed += Suffix;
NamedOutput = C.getArgs().MakeArgString(Suffixed.c_str());
}
// Prepend object file path if -save-temps=obj
if (!AtTopLevel && isSaveTempsObj() && C.getArgs().hasArg(options::OPT_o) &&
JA.getType() != types::TY_PCH) {
Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o);
SmallString<128> TempPath(FinalOutput->getValue());
llvm::sys::path::remove_filename(TempPath);
StringRef OutputFileName = llvm::sys::path::filename(NamedOutput);
llvm::sys::path::append(TempPath, OutputFileName);
NamedOutput = C.getArgs().MakeArgString(TempPath.c_str());
}
// If we're saving temps and the temp file conflicts with the input file,
// then avoid overwriting input file.
if (!AtTopLevel && isSaveTempsEnabled() && NamedOutput == BaseName) {
bool SameFile = false;
SmallString<256> Result;
llvm::sys::fs::current_path(Result);
llvm::sys::path::append(Result, BaseName);
llvm::sys::fs::equivalent(BaseInput, Result.c_str(), SameFile);
// Must share the same path to conflict.
if (SameFile) {
StringRef Name = llvm::sys::path::filename(BaseInput);
std::pair<StringRef, StringRef> Split = Name.split('.');
std::string TmpName = GetTemporaryPath(
Split.first, types::getTypeTempSuffix(JA.getType(), IsCLMode()));
return C.addTempFile(C.getArgs().MakeArgString(TmpName));
}
}
// As an annoying special case, PCH generation doesn't strip the pathname.
if (JA.getType() == types::TY_PCH && !IsCLMode()) {
llvm::sys::path::remove_filename(BasePath);
if (BasePath.empty())
BasePath = NamedOutput;
else
llvm::sys::path::append(BasePath, NamedOutput);
return C.addResultFile(C.getArgs().MakeArgString(BasePath.c_str()), &JA);
} else {
return C.addResultFile(NamedOutput, &JA);
}
}
std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
// Respect a limited subset of the '-Bprefix' functionality in GCC by
// attempting to use this prefix when looking for file paths.
for (const std::string &Dir : PrefixDirs) {
if (Dir.empty())
continue;
SmallString<128> P(Dir[0] == '=' ? SysRoot + Dir.substr(1) : Dir);
llvm::sys::path::append(P, Name);
if (llvm::sys::fs::exists(Twine(P)))
return P.str();
}
SmallString<128> R(ResourceDir);
llvm::sys::path::append(R, Name);
if (llvm::sys::fs::exists(Twine(R)))
return R.str();
SmallString<128> P(TC.getCompilerRTPath());
llvm::sys::path::append(P, Name);
if (llvm::sys::fs::exists(Twine(P)))
return P.str();
for (const std::string &Dir : TC.getFilePaths()) {
if (Dir.empty())
continue;
SmallString<128> P(Dir[0] == '=' ? SysRoot + Dir.substr(1) : Dir);
llvm::sys::path::append(P, Name);
if (llvm::sys::fs::exists(Twine(P)))
return P.str();
}
return Name;
}
void Driver::generatePrefixedToolNames(
StringRef Tool, const ToolChain &TC,
SmallVectorImpl<std::string> &Names) const {
// FIXME: Needs a better variable than TargetTriple
Names.emplace_back((TargetTriple + "-" + Tool).str());
Names.emplace_back(Tool);
// Allow the discovery of tools prefixed with LLVM's default target triple.
std::string DefaultTargetTriple = llvm::sys::getDefaultTargetTriple();
if (DefaultTargetTriple != TargetTriple)
Names.emplace_back((DefaultTargetTriple + "-" + Tool).str());
}
static bool ScanDirForExecutable(SmallString<128> &Dir,
ArrayRef<std::string> Names) {
for (const auto &Name : Names) {
llvm::sys::path::append(Dir, Name);
if (llvm::sys::fs::can_execute(Twine(Dir)))
return true;
llvm::sys::path::remove_filename(Dir);
}
return false;
}
std::string Driver::GetProgramPath(StringRef Name, const ToolChain &TC) const {
SmallVector<std::string, 2> TargetSpecificExecutables;
generatePrefixedToolNames(Name, TC, TargetSpecificExecutables);
// Respect a limited subset of the '-Bprefix' functionality in GCC by
// attempting to use this prefix when looking for program paths.
for (const auto &PrefixDir : PrefixDirs) {
if (llvm::sys::fs::is_directory(PrefixDir)) {
SmallString<128> P(PrefixDir);
if (ScanDirForExecutable(P, TargetSpecificExecutables))
return P.str();
} else {
SmallString<128> P((PrefixDir + Name).str());
if (llvm::sys::fs::can_execute(Twine(P)))
return P.str();
}
}
const ToolChain::path_list &List = TC.getProgramPaths();
for (const auto &Path : List) {
SmallString<128> P(Path);
if (ScanDirForExecutable(P, TargetSpecificExecutables))
return P.str();
}
// If all else failed, search the path.
for (const auto &TargetSpecificExecutable : TargetSpecificExecutables)
if (llvm::ErrorOr<std::string> P =
llvm::sys::findProgramByName(TargetSpecificExecutable))
return *P;
return Name;
}
std::string Driver::GetTemporaryPath(StringRef Prefix, StringRef Suffix) const {
SmallString<128> Path;
std::error_code EC = llvm::sys::fs::createTemporaryFile(Prefix, Suffix, Path);
if (EC) {
Diag(clang::diag::err_unable_to_make_temp) << EC.message();
return "";
}
return Path.str();
}
std::string Driver::GetClPchPath(Compilation &C, StringRef BaseName) const {
SmallString<128> Output;
if (Arg *FpArg = C.getArgs().getLastArg(options::OPT__SLASH_Fp)) {
// FIXME: If anybody needs it, implement this obscure rule:
// "If you specify a directory without a file name, the default file name
// is VCx0.pch., where x is the major version of Visual C++ in use."
Output = FpArg->getValue();
// "If you do not specify an extension as part of the path name, an
// extension of .pch is assumed. "
if (!llvm::sys::path::has_extension(Output))
Output += ".pch";
} else if (Arg *YcArg = C.getArgs().getLastArg(options::OPT__SLASH_Yc)) {
Output = YcArg->getValue();
llvm::sys::path::replace_extension(Output, ".pch");
} else {
Output = BaseName;
llvm::sys::path::replace_extension(Output, ".pch");
}
return Output.str();
}
const ToolChain &Driver::getToolChain(const ArgList &Args,
const llvm::Triple &Target) const {
auto &TC = ToolChains[Target.str()];
if (!TC) {
switch (Target.getOS()) {
case llvm::Triple::Haiku:
TC = llvm::make_unique<toolchains::Haiku>(*this, Target, Args);
break;
case llvm::Triple::Ananas:
TC = llvm::make_unique<toolchains::Ananas>(*this, Target, Args);
break;
case llvm::Triple::CloudABI:
TC = llvm::make_unique<toolchains::CloudABI>(*this, Target, Args);
break;
case llvm::Triple::Darwin:
case llvm::Triple::MacOSX:
case llvm::Triple::IOS:
case llvm::Triple::TvOS:
case llvm::Triple::WatchOS:
TC = llvm::make_unique<toolchains::DarwinClang>(*this, Target, Args);
break;
case llvm::Triple::DragonFly:
TC = llvm::make_unique<toolchains::DragonFly>(*this, Target, Args);
break;
case llvm::Triple::OpenBSD:
TC = llvm::make_unique<toolchains::OpenBSD>(*this, Target, Args);
break;
case llvm::Triple::NetBSD:
TC = llvm::make_unique<toolchains::NetBSD>(*this, Target, Args);
break;
case llvm::Triple::FreeBSD:
TC = llvm::make_unique<toolchains::FreeBSD>(*this, Target, Args);
break;
case llvm::Triple::Minix:
TC = llvm::make_unique<toolchains::Minix>(*this, Target, Args);
break;
case llvm::Triple::Linux:
case llvm::Triple::ELFIAMCU:
if (Target.getArch() == llvm::Triple::hexagon)
TC = llvm::make_unique<toolchains::HexagonToolChain>(*this, Target,
Args);
else if ((Target.getVendor() == llvm::Triple::MipsTechnologies) &&
!Target.hasEnvironment())
TC = llvm::make_unique<toolchains::MipsLLVMToolChain>(*this, Target,
Args);
else
TC = llvm::make_unique<toolchains::Linux>(*this, Target, Args);
break;
case llvm::Triple::NaCl:
TC = llvm::make_unique<toolchains::NaClToolChain>(*this, Target, Args);
break;
case llvm::Triple::Fuchsia:
TC = llvm::make_unique<toolchains::Fuchsia>(*this, Target, Args);
break;
case llvm::Triple::Solaris:
TC = llvm::make_unique<toolchains::Solaris>(*this, Target, Args);
break;
case llvm::Triple::AMDHSA:
TC = llvm::make_unique<toolchains::AMDGPUToolChain>(*this, Target, Args);
break;
case llvm::Triple::Win32:
switch (Target.getEnvironment()) {
default:
if (Target.isOSBinFormatELF())
TC = llvm::make_unique<toolchains::Generic_ELF>(*this, Target, Args);
else if (Target.isOSBinFormatMachO())
TC = llvm::make_unique<toolchains::MachO>(*this, Target, Args);
else
TC = llvm::make_unique<toolchains::Generic_GCC>(*this, Target, Args);
break;
case llvm::Triple::GNU:
TC = llvm::make_unique<toolchains::MinGW>(*this, Target, Args);
break;
case llvm::Triple::Itanium:
TC = llvm::make_unique<toolchains::CrossWindowsToolChain>(*this, Target,
Args);
break;
case llvm::Triple::MSVC:
case llvm::Triple::UnknownEnvironment:
if (Args.getLastArgValue(options::OPT_fuse_ld_EQ)
.startswith_lower("bfd"))
TC = llvm::make_unique<toolchains::CrossWindowsToolChain>(
*this, Target, Args);
else
TC =
llvm::make_unique<toolchains::MSVCToolChain>(*this, Target, Args);
break;
}
break;
case llvm::Triple::PS4:
TC = llvm::make_unique<toolchains::PS4CPU>(*this, Target, Args);
break;
case llvm::Triple::Contiki:
TC = llvm::make_unique<toolchains::Contiki>(*this, Target, Args);
break;
default:
// Of these targets, Hexagon is the only one that might have
// an OS of Linux, in which case it got handled above already.
switch (Target.getArch()) {
case llvm::Triple::tce:
TC = llvm::make_unique<toolchains::TCEToolChain>(*this, Target, Args);
break;
case llvm::Triple::tcele:
TC = llvm::make_unique<toolchains::TCELEToolChain>(*this, Target, Args);
break;
case llvm::Triple::hexagon:
TC = llvm::make_unique<toolchains::HexagonToolChain>(*this, Target,
Args);
break;
case llvm::Triple::lanai:
TC = llvm::make_unique<toolchains::LanaiToolChain>(*this, Target, Args);
break;
case llvm::Triple::xcore:
TC = llvm::make_unique<toolchains::XCoreToolChain>(*this, Target, Args);
break;
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
TC = llvm::make_unique<toolchains::WebAssembly>(*this, Target, Args);
break;
case llvm::Triple::avr:
TC = llvm::make_unique<toolchains::AVRToolChain>(*this, Target, Args);
break;
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
TC = llvm::make_unique<toolchains::RISCVToolChain>(*this, Target, Args);
break;
default:
if (Target.getVendor() == llvm::Triple::Myriad)
TC = llvm::make_unique<toolchains::MyriadToolChain>(*this, Target,
Args);
else if (toolchains::BareMetal::handlesTarget(Target))
TC = llvm::make_unique<toolchains::BareMetal>(*this, Target, Args);
else if (Target.isOSBinFormatELF())
TC = llvm::make_unique<toolchains::Generic_ELF>(*this, Target, Args);
else if (Target.isOSBinFormatMachO())
TC = llvm::make_unique<toolchains::MachO>(*this, Target, Args);
else
TC = llvm::make_unique<toolchains::Generic_GCC>(*this, Target, Args);
}
}
}
// Intentionally omitted from the switch above: llvm::Triple::CUDA. CUDA
// compiles always need two toolchains, the CUDA toolchain and the host
// toolchain. So the only valid way to create a CUDA toolchain is via
// CreateOffloadingDeviceToolChains.
return *TC;
}
bool Driver::ShouldUseClangCompiler(const JobAction &JA) const {
// Say "no" if there is not exactly one input of a type clang understands.
if (JA.size() != 1 ||
!types::isAcceptedByClang((*JA.input_begin())->getType()))
return false;
// And say "no" if this is not a kind of action clang understands.
if (!isa<PreprocessJobAction>(JA) && !isa<PrecompileJobAction>(JA) &&
!isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA))
return false;
return true;
}
/// GetReleaseVersion - Parse (([0-9]+)(.([0-9]+)(.([0-9]+)?))?)? and return the
/// grouped values as integers. Numbers which are not provided are set to 0.
///
/// \return True if the entire string was parsed (9.2), or all groups were
/// parsed (10.3.5extrastuff).
bool Driver::GetReleaseVersion(StringRef Str, unsigned &Major, unsigned &Minor,
unsigned &Micro, bool &HadExtra) {
HadExtra = false;
Major = Minor = Micro = 0;
if (Str.empty())
return false;
if (Str.consumeInteger(10, Major))
return false;
if (Str.empty())
return true;
if (Str[0] != '.')
return false;
Str = Str.drop_front(1);
if (Str.consumeInteger(10, Minor))
return false;
if (Str.empty())
return true;
if (Str[0] != '.')
return false;
Str = Str.drop_front(1);
if (Str.consumeInteger(10, Micro))
return false;
if (!Str.empty())
HadExtra = true;
return true;
}
/// Parse digits from a string \p Str and fulfill \p Digits with
/// the parsed numbers. This method assumes that the max number of
/// digits to look for is equal to Digits.size().
///
/// \return True if the entire string was parsed and there are
/// no extra characters remaining at the end.
bool Driver::GetReleaseVersion(StringRef Str,
MutableArrayRef<unsigned> Digits) {
if (Str.empty())
return false;
unsigned CurDigit = 0;
while (CurDigit < Digits.size()) {
unsigned Digit;
if (Str.consumeInteger(10, Digit))
return false;
Digits[CurDigit] = Digit;
if (Str.empty())
return true;
if (Str[0] != '.')
return false;
Str = Str.drop_front(1);
CurDigit++;
}
// More digits than requested, bail out...
return false;
}
std::pair<unsigned, unsigned> Driver::getIncludeExcludeOptionFlagMasks() const {
unsigned IncludedFlagsBitmask = 0;
unsigned ExcludedFlagsBitmask = options::NoDriverOption;
if (Mode == CLMode) {
// Include CL and Core options.
IncludedFlagsBitmask |= options::CLOption;
IncludedFlagsBitmask |= options::CoreOption;
} else {
ExcludedFlagsBitmask |= options::CLOption;
}
return std::make_pair(IncludedFlagsBitmask, ExcludedFlagsBitmask);
}
bool clang::driver::isOptimizationLevelFast(const ArgList &Args) {
return Args.hasFlag(options::OPT_Ofast, options::OPT_O_Group, false);
}
Index: projects/clang700-import/contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/clang/lib/Driver/ToolChains/Gnu.cpp (revision 340125)
@@ -1,2562 +1,2562 @@
//===--- Gnu.cpp - Gnu Tool and ToolChain Implementations -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "Gnu.h"
#include "Arch/ARM.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
#include "Arch/RISCV.h"
#include "Arch/Sparc.h"
#include "Arch/SystemZ.h"
#include "CommonArgs.h"
#include "Linux.h"
#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h" // for GCC_INSTALL_PREFIX
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Tool.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TargetParser.h"
#include <system_error>
using namespace clang::driver;
using namespace clang::driver::toolchains;
using namespace clang;
using namespace llvm::opt;
void tools::GnuTool::anchor() {}
static bool forwardToGCC(const Option &O) {
// Don't forward inputs from the original command line. They are added from
// InputInfoList.
return O.getKind() != Option::InputClass &&
!O.hasFlag(options::DriverOption) && !O.hasFlag(options::LinkerInput);
}
// Switch CPU names not recognized by GNU assembler to a close CPU that it does
// recognize, instead of a lower march from being picked in the absence of a cpu
// flag.
static void normalizeCPUNamesForAssembler(const ArgList &Args,
ArgStringList &CmdArgs) {
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
StringRef CPUArg(A->getValue());
if (CPUArg.equals_lower("krait"))
CmdArgs.push_back("-mcpu=cortex-a15");
else if(CPUArg.equals_lower("kryo"))
CmdArgs.push_back("-mcpu=cortex-a57");
else
Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ);
}
}
void tools::gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
const Driver &D = getToolChain().getDriver();
ArgStringList CmdArgs;
for (const auto &A : Args) {
if (forwardToGCC(A->getOption())) {
// It is unfortunate that we have to claim here, as this means
// we will basically never report anything interesting for
// platforms using a generic gcc, even if we are just using gcc
// to get to the assembler.
A->claim();
// Don't forward any -g arguments to assembly steps.
if (isa<AssembleJobAction>(JA) &&
A->getOption().matches(options::OPT_g_Group))
continue;
// Don't forward any -W arguments to assembly and link steps.
if ((isa<AssembleJobAction>(JA) || isa<LinkJobAction>(JA)) &&
A->getOption().matches(options::OPT_W_Group))
continue;
// Don't forward -mno-unaligned-access since GCC doesn't understand
// it and because it doesn't affect the assembly or link steps.
if ((isa<AssembleJobAction>(JA) || isa<LinkJobAction>(JA)) &&
(A->getOption().matches(options::OPT_munaligned_access) ||
A->getOption().matches(options::OPT_mno_unaligned_access)))
continue;
A->render(Args, CmdArgs);
}
}
RenderExtraToolArgs(JA, CmdArgs);
// If using a driver driver, force the arch.
if (getToolChain().getTriple().isOSDarwin()) {
CmdArgs.push_back("-arch");
CmdArgs.push_back(
Args.MakeArgString(getToolChain().getDefaultUniversalArchName()));
}
// Try to force gcc to match the tool chain we want, if we recognize
// the arch.
//
// FIXME: The triple class should directly provide the information we want
// here.
switch (getToolChain().getArch()) {
default:
break;
case llvm::Triple::x86:
case llvm::Triple::ppc:
CmdArgs.push_back("-m32");
break;
case llvm::Triple::x86_64:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
CmdArgs.push_back("-m64");
break;
case llvm::Triple::sparcel:
CmdArgs.push_back("-EL");
break;
}
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
} else {
assert(Output.isNothing() && "Unexpected output");
CmdArgs.push_back("-fsyntax-only");
}
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
// Only pass -x if gcc will understand it; otherwise hope gcc
// understands the suffix correctly. The main use case this would go
// wrong in is for linker inputs if they happened to have an odd
// suffix; really the only way to get this to happen is a command
// like '-x foobar a.c' which will treat a.c like a linker input.
//
// FIXME: For the linker case specifically, can we safely convert
// inputs into '-Wl,' options?
for (const auto &II : Inputs) {
// Don't try to pass LLVM or AST inputs to a generic gcc.
if (types::isLLVMIR(II.getType()))
D.Diag(clang::diag::err_drv_no_linker_llvm_support)
<< getToolChain().getTripleString();
else if (II.getType() == types::TY_AST)
D.Diag(diag::err_drv_no_ast_support) << getToolChain().getTripleString();
else if (II.getType() == types::TY_ModuleFile)
D.Diag(diag::err_drv_no_module_support)
<< getToolChain().getTripleString();
if (types::canTypeBeUserSpecified(II.getType())) {
CmdArgs.push_back("-x");
CmdArgs.push_back(types::getTypeName(II.getType()));
}
if (II.isFilename())
CmdArgs.push_back(II.getFilename());
else {
const Arg &A = II.getInputArg();
// Reverse translate some rewritten options.
if (A.getOption().matches(options::OPT_Z_reserved_lib_stdcxx)) {
CmdArgs.push_back("-lstdc++");
continue;
}
// Don't render as input, we need gcc to do the translations.
A.render(Args, CmdArgs);
}
}
const std::string &customGCCName = D.getCCCGenericGCCName();
const char *GCCName;
if (!customGCCName.empty())
GCCName = customGCCName.c_str();
else if (D.CCCIsCXX()) {
GCCName = "g++";
} else
GCCName = "gcc";
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void tools::gcc::Preprocessor::RenderExtraToolArgs(
const JobAction &JA, ArgStringList &CmdArgs) const {
CmdArgs.push_back("-E");
}
void tools::gcc::Compiler::RenderExtraToolArgs(const JobAction &JA,
ArgStringList &CmdArgs) const {
const Driver &D = getToolChain().getDriver();
switch (JA.getType()) {
// If -flto, etc. are present then make sure not to force assembly output.
case types::TY_LLVM_IR:
case types::TY_LTO_IR:
case types::TY_LLVM_BC:
case types::TY_LTO_BC:
CmdArgs.push_back("-c");
break;
// We assume we've got an "integrated" assembler in that gcc will produce an
// object file itself.
case types::TY_Object:
CmdArgs.push_back("-c");
break;
case types::TY_PP_Asm:
CmdArgs.push_back("-S");
break;
case types::TY_Nothing:
CmdArgs.push_back("-fsyntax-only");
break;
default:
D.Diag(diag::err_drv_invalid_gcc_output_type) << getTypeName(JA.getType());
}
}
void tools::gcc::Linker::RenderExtraToolArgs(const JobAction &JA,
ArgStringList &CmdArgs) const {
// The types are (hopefully) good enough.
}
static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
switch (T.getArch()) {
case llvm::Triple::x86:
if (T.isOSIAMCU())
return "elf_iamcu";
return "elf_i386";
case llvm::Triple::aarch64:
return "aarch64linux";
case llvm::Triple::aarch64_be:
- return "aarch64_be_linux";
+ return "aarch64linuxb";
case llvm::Triple::arm:
case llvm::Triple::thumb:
return "armelf_linux_eabi";
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
return "armelfb_linux_eabi";
case llvm::Triple::ppc:
return "elf32ppclinux";
case llvm::Triple::ppc64:
return "elf64ppc";
case llvm::Triple::ppc64le:
return "elf64lppc";
case llvm::Triple::riscv32:
return "elf32lriscv";
case llvm::Triple::riscv64:
return "elf64lriscv";
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
return "elf32_sparc";
case llvm::Triple::sparcv9:
return "elf64_sparc";
case llvm::Triple::mips:
return "elf32btsmip";
case llvm::Triple::mipsel:
return "elf32ltsmip";
case llvm::Triple::mips64:
if (tools::mips::hasMipsAbiArg(Args, "n32"))
return "elf32btsmipn32";
return "elf64btsmip";
case llvm::Triple::mips64el:
if (tools::mips::hasMipsAbiArg(Args, "n32"))
return "elf32ltsmipn32";
return "elf64ltsmip";
case llvm::Triple::systemz:
return "elf64_s390";
case llvm::Triple::x86_64:
if (T.getEnvironment() == llvm::Triple::GNUX32)
return "elf32_x86_64";
return "elf_x86_64";
default:
return nullptr;
}
}
static bool getPIE(const ArgList &Args, const toolchains::Linux &ToolChain) {
if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_static) ||
Args.hasArg(options::OPT_r))
return false;
Arg *A = Args.getLastArg(options::OPT_pie, options::OPT_no_pie,
options::OPT_nopie);
if (!A)
return ToolChain.isPIEDefault();
return A->getOption().matches(options::OPT_pie);
}
void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
const toolchains::Linux &ToolChain =
static_cast<const toolchains::Linux &>(getToolChain());
const Driver &D = ToolChain.getDriver();
const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
const llvm::Triple::ArchType Arch = ToolChain.getArch();
const bool isAndroid = ToolChain.getTriple().isAndroid();
const bool IsIAMCU = ToolChain.getTriple().isOSIAMCU();
const bool IsPIE = getPIE(Args, ToolChain);
const bool HasCRTBeginEndFiles =
ToolChain.getTriple().hasEnvironment() ||
(ToolChain.getTriple().getVendor() != llvm::Triple::MipsTechnologies);
ArgStringList CmdArgs;
// Silence warning for "clang -g foo.o -o foo"
Args.ClaimAllArgs(options::OPT_g_Group);
// and "clang -emit-llvm foo.o -o foo"
Args.ClaimAllArgs(options::OPT_emit_llvm);
// and for "clang -w foo.o -o foo". Other warning options are already
// handled somewhere else.
Args.ClaimAllArgs(options::OPT_w);
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
if (llvm::sys::path::stem(Exec) == "lld") {
CmdArgs.push_back("-flavor");
CmdArgs.push_back("old-gnu");
CmdArgs.push_back("-target");
CmdArgs.push_back(Args.MakeArgString(getToolChain().getTripleString()));
}
if (!D.SysRoot.empty())
CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
if (IsPIE)
CmdArgs.push_back("-pie");
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
if (Args.hasArg(options::OPT_s))
CmdArgs.push_back("-s");
if (Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumbeb)
arm::appendEBLinkFlags(Args, CmdArgs, Triple);
// Most Android ARM64 targets should enable the linker fix for erratum
// 843419. Only non-Cortex-A53 devices are allowed to skip this flag.
if (Arch == llvm::Triple::aarch64 && isAndroid) {
std::string CPU = getCPUName(Args, Triple);
if (CPU.empty() || CPU == "generic" || CPU == "cortex-a53")
CmdArgs.push_back("--fix-cortex-a53-843419");
}
for (const auto &Opt : ToolChain.ExtraOpts)
CmdArgs.push_back(Opt.c_str());
CmdArgs.push_back("--eh-frame-hdr");
if (const char *LDMOption = getLDMOption(ToolChain.getTriple(), Args)) {
CmdArgs.push_back("-m");
CmdArgs.push_back(LDMOption);
} else {
D.Diag(diag::err_target_unknown_triple) << Triple.str();
return;
}
if (Args.hasArg(options::OPT_static)) {
if (Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb)
CmdArgs.push_back("-Bstatic");
else
CmdArgs.push_back("-static");
} else if (Args.hasArg(options::OPT_shared)) {
CmdArgs.push_back("-shared");
}
if (!Args.hasArg(options::OPT_static)) {
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
if (!Args.hasArg(options::OPT_shared)) {
const std::string Loader =
D.DyldPrefix + ToolChain.getDynamicLinker(Args);
CmdArgs.push_back("-dynamic-linker");
CmdArgs.push_back(Args.MakeArgString(Loader));
}
}
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
if (!isAndroid && !IsIAMCU) {
const char *crt1 = nullptr;
if (!Args.hasArg(options::OPT_shared)) {
if (Args.hasArg(options::OPT_pg))
crt1 = "gcrt1.o";
else if (IsPIE)
crt1 = "Scrt1.o";
else
crt1 = "crt1.o";
}
if (crt1)
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
}
if (IsIAMCU)
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
else {
const char *crtbegin;
if (Args.hasArg(options::OPT_static))
crtbegin = isAndroid ? "crtbegin_static.o" : "crtbeginT.o";
else if (Args.hasArg(options::OPT_shared))
crtbegin = isAndroid ? "crtbegin_so.o" : "crtbeginS.o";
else if (IsPIE)
crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbeginS.o";
else
crtbegin = isAndroid ? "crtbegin_dynamic.o" : "crtbegin.o";
if (HasCRTBeginEndFiles)
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
}
// Add crtfastmath.o if available and fast math is enabled.
ToolChain.AddFastMathRuntimeIfAvailable(Args, CmdArgs);
}
Args.AddAllArgs(CmdArgs, options::OPT_L);
Args.AddAllArgs(CmdArgs, options::OPT_u);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
if (D.isUsingLTO()) {
assert(!Inputs.empty() && "Must have at least one input.");
AddGoldPlugin(ToolChain, Args, CmdArgs, Output, Inputs[0],
D.getLTOMode() == LTOK_Thin);
}
if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("--no-demangle");
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
// The profile runtime also needs access to system libraries.
getToolChain().addProfileRTLibs(Args, CmdArgs);
if (D.CCCIsCXX() &&
!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (ToolChain.ShouldLinkCXXStdlib(Args)) {
bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
!Args.hasArg(options::OPT_static);
if (OnlyLibstdcxxStatic)
CmdArgs.push_back("-Bstatic");
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
if (OnlyLibstdcxxStatic)
CmdArgs.push_back("-Bdynamic");
}
CmdArgs.push_back("-lm");
}
// Silence warnings when linking C code with a C++ '-stdlib' argument.
Args.ClaimAllArgs(options::OPT_stdlib_EQ);
if (!Args.hasArg(options::OPT_nostdlib)) {
if (!Args.hasArg(options::OPT_nodefaultlibs)) {
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("--start-group");
if (NeedsSanitizerDeps)
linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
if (NeedsXRayDeps)
linkXRayRuntimeDeps(ToolChain, CmdArgs);
bool WantPthread = Args.hasArg(options::OPT_pthread) ||
Args.hasArg(options::OPT_pthreads);
// FIXME: Only pass GompNeedsRT = true for platforms with libgomp that
// require librt. Most modern Linux platforms do, but some may not.
if (addOpenMPRuntime(CmdArgs, ToolChain, Args,
JA.isHostOffloading(Action::OFK_OpenMP),
/* GompNeedsRT= */ true))
// OpenMP runtimes implies pthreads when using the GNU toolchain.
// FIXME: Does this really make sense for all GNU toolchains?
WantPthread = true;
AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
if (WantPthread && !isAndroid)
CmdArgs.push_back("-lpthread");
if (Args.hasArg(options::OPT_fsplit_stack))
CmdArgs.push_back("--wrap=pthread_create");
CmdArgs.push_back("-lc");
// Add IAMCU specific libs, if needed.
if (IsIAMCU)
CmdArgs.push_back("-lgloss");
if (Args.hasArg(options::OPT_static))
CmdArgs.push_back("--end-group");
else
AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
// Add IAMCU specific libs (outside the group), if needed.
if (IsIAMCU) {
CmdArgs.push_back("--as-needed");
CmdArgs.push_back("-lsoftfp");
CmdArgs.push_back("--no-as-needed");
}
}
if (!Args.hasArg(options::OPT_nostartfiles) && !IsIAMCU) {
const char *crtend;
if (Args.hasArg(options::OPT_shared))
crtend = isAndroid ? "crtend_so.o" : "crtendS.o";
else if (IsPIE)
crtend = isAndroid ? "crtend_android.o" : "crtendS.o";
else
crtend = isAndroid ? "crtend_android.o" : "crtend.o";
if (HasCRTBeginEndFiles)
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
if (!isAndroid)
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
}
}
// Add OpenMP offloading linker script args if required.
AddOpenMPLinkerScript(getToolChain(), C, Output, Inputs, Args, CmdArgs, JA);
// Add HIP offloading linker script args if required.
AddHIPLinkerScript(getToolChain(), C, Output, Inputs, Args, CmdArgs, JA,
*this);
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
void tools::gnutools::Assembler::ConstructJob(Compilation &C,
const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
const auto &D = getToolChain().getDriver();
claimNoWarnArgs(Args);
ArgStringList CmdArgs;
llvm::Reloc::Model RelocationModel;
unsigned PICLevel;
bool IsPIE;
std::tie(RelocationModel, PICLevel, IsPIE) =
ParsePICArgs(getToolChain(), Args);
if (const Arg *A = Args.getLastArg(options::OPT_gz, options::OPT_gz_EQ)) {
if (A->getOption().getID() == options::OPT_gz) {
CmdArgs.push_back("-compress-debug-sections");
} else {
StringRef Value = A->getValue();
if (Value == "none") {
CmdArgs.push_back("-compress-debug-sections=none");
} else if (Value == "zlib" || Value == "zlib-gnu") {
CmdArgs.push_back(
Args.MakeArgString("-compress-debug-sections=" + Twine(Value)));
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Value;
}
}
}
switch (getToolChain().getArch()) {
default:
break;
// Add --32/--64 to make sure we get the format we want.
// This is incomplete
case llvm::Triple::x86:
CmdArgs.push_back("--32");
break;
case llvm::Triple::x86_64:
if (getToolChain().getTriple().getEnvironment() == llvm::Triple::GNUX32)
CmdArgs.push_back("--x32");
else
CmdArgs.push_back("--64");
break;
case llvm::Triple::ppc: {
CmdArgs.push_back("-a32");
CmdArgs.push_back("-mppc");
CmdArgs.push_back(
ppc::getPPCAsmModeForCPU(getCPUName(Args, getToolChain().getTriple())));
break;
}
case llvm::Triple::ppc64: {
CmdArgs.push_back("-a64");
CmdArgs.push_back("-mppc64");
CmdArgs.push_back(
ppc::getPPCAsmModeForCPU(getCPUName(Args, getToolChain().getTriple())));
break;
}
case llvm::Triple::ppc64le: {
CmdArgs.push_back("-a64");
CmdArgs.push_back("-mppc64");
CmdArgs.push_back("-mlittle-endian");
CmdArgs.push_back(
ppc::getPPCAsmModeForCPU(getCPUName(Args, getToolChain().getTriple())));
break;
}
case llvm::Triple::riscv32:
case llvm::Triple::riscv64: {
StringRef ABIName = riscv::getRISCVABI(Args, getToolChain().getTriple());
CmdArgs.push_back("-mabi");
CmdArgs.push_back(ABIName.data());
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
StringRef MArch = A->getValue();
CmdArgs.push_back("-march");
CmdArgs.push_back(MArch.data());
}
break;
}
case llvm::Triple::sparc:
case llvm::Triple::sparcel: {
CmdArgs.push_back("-32");
std::string CPU = getCPUName(Args, getToolChain().getTriple());
CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
}
case llvm::Triple::sparcv9: {
CmdArgs.push_back("-64");
std::string CPU = getCPUName(Args, getToolChain().getTriple());
CmdArgs.push_back(sparc::getSparcAsmModeForCPU(CPU, getToolChain().getTriple()));
AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
}
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb: {
const llvm::Triple &Triple2 = getToolChain().getTriple();
switch (Triple2.getSubArch()) {
case llvm::Triple::ARMSubArch_v7:
CmdArgs.push_back("-mfpu=neon");
break;
case llvm::Triple::ARMSubArch_v8:
CmdArgs.push_back("-mfpu=crypto-neon-fp-armv8");
break;
default:
break;
}
switch (arm::getARMFloatABI(getToolChain(), Args)) {
case arm::FloatABI::Invalid: llvm_unreachable("must have an ABI!");
case arm::FloatABI::Soft:
CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=soft"));
break;
case arm::FloatABI::SoftFP:
CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=softfp"));
break;
case arm::FloatABI::Hard:
CmdArgs.push_back(Args.MakeArgString("-mfloat-abi=hard"));
break;
}
Args.AddLastArg(CmdArgs, options::OPT_march_EQ);
normalizeCPUNamesForAssembler(Args, CmdArgs);
Args.AddLastArg(CmdArgs, options::OPT_mfpu_EQ);
break;
}
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be: {
Args.AddLastArg(CmdArgs, options::OPT_march_EQ);
normalizeCPUNamesForAssembler(Args, CmdArgs);
break;
}
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
case llvm::Triple::mips64el: {
StringRef CPUName;
StringRef ABIName;
mips::getMipsCPUAndABI(Args, getToolChain().getTriple(), CPUName, ABIName);
ABIName = mips::getGnuCompatibleMipsABIName(ABIName);
CmdArgs.push_back("-march");
CmdArgs.push_back(CPUName.data());
CmdArgs.push_back("-mabi");
CmdArgs.push_back(ABIName.data());
// -mno-shared should be emitted unless -fpic, -fpie, -fPIC, -fPIE,
// or -mshared (not implemented) is in effect.
if (RelocationModel == llvm::Reloc::Static)
CmdArgs.push_back("-mno-shared");
// LLVM doesn't support -mplt yet and acts as if it is always given.
// However, -mplt has no effect with the N64 ABI.
if (ABIName != "64" && !Args.hasArg(options::OPT_mno_abicalls))
CmdArgs.push_back("-call_nonpic");
if (getToolChain().getTriple().isLittleEndian())
CmdArgs.push_back("-EL");
else
CmdArgs.push_back("-EB");
if (Arg *A = Args.getLastArg(options::OPT_mnan_EQ)) {
if (StringRef(A->getValue()) == "2008")
CmdArgs.push_back(Args.MakeArgString("-mnan=2008"));
}
// Add the last -mfp32/-mfpxx/-mfp64 or -mfpxx if it is enabled by default.
if (Arg *A = Args.getLastArg(options::OPT_mfp32, options::OPT_mfpxx,
options::OPT_mfp64)) {
A->claim();
A->render(Args, CmdArgs);
} else if (mips::shouldUseFPXX(
Args, getToolChain().getTriple(), CPUName, ABIName,
mips::getMipsFloatABI(getToolChain().getDriver(), Args)))
CmdArgs.push_back("-mfpxx");
// Pass on -mmips16 or -mno-mips16. However, the assembler equivalent of
// -mno-mips16 is actually -no-mips16.
if (Arg *A =
Args.getLastArg(options::OPT_mips16, options::OPT_mno_mips16)) {
if (A->getOption().matches(options::OPT_mips16)) {
A->claim();
A->render(Args, CmdArgs);
} else {
A->claim();
CmdArgs.push_back("-no-mips16");
}
}
Args.AddLastArg(CmdArgs, options::OPT_mmicromips,
options::OPT_mno_micromips);
Args.AddLastArg(CmdArgs, options::OPT_mdsp, options::OPT_mno_dsp);
Args.AddLastArg(CmdArgs, options::OPT_mdspr2, options::OPT_mno_dspr2);
if (Arg *A = Args.getLastArg(options::OPT_mmsa, options::OPT_mno_msa)) {
// Do not use AddLastArg because not all versions of MIPS assembler
// support -mmsa / -mno-msa options.
if (A->getOption().matches(options::OPT_mmsa))
CmdArgs.push_back(Args.MakeArgString("-mmsa"));
}
Args.AddLastArg(CmdArgs, options::OPT_mhard_float,
options::OPT_msoft_float);
Args.AddLastArg(CmdArgs, options::OPT_mdouble_float,
options::OPT_msingle_float);
Args.AddLastArg(CmdArgs, options::OPT_modd_spreg,
options::OPT_mno_odd_spreg);
AddAssemblerKPIC(getToolChain(), Args, CmdArgs);
break;
}
case llvm::Triple::systemz: {
// Always pass an -march option, since our default of z10 is later
// than the GNU assembler's default.
StringRef CPUName = systemz::getSystemZTargetCPU(Args);
CmdArgs.push_back(Args.MakeArgString("-march=" + CPUName));
break;
}
}
Args.AddAllArgs(CmdArgs, options::OPT_I);
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
for (const auto &II : Inputs)
CmdArgs.push_back(II.getFilename());
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("as"));
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
// Handle the debug info splitting at object creation time if we're
// creating an object.
// TODO: Currently only works on linux with newer objcopy.
if (Args.hasArg(options::OPT_gsplit_dwarf) &&
getToolChain().getTriple().isOSLinux())
SplitDebugInfo(getToolChain(), C, *this, JA, Args, Output,
SplitDebugName(Args, Inputs[0]));
}
namespace {
// Filter to remove Multilibs that don't exist as a suffix to Path
class FilterNonExistent {
StringRef Base, File;
vfs::FileSystem &VFS;
public:
FilterNonExistent(StringRef Base, StringRef File, vfs::FileSystem &VFS)
: Base(Base), File(File), VFS(VFS) {}
bool operator()(const Multilib &M) {
return !VFS.exists(Base + M.gccSuffix() + File);
}
};
} // end anonymous namespace
static bool isSoftFloatABI(const ArgList &Args) {
Arg *A = Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
options::OPT_mfloat_abi_EQ);
if (!A)
return false;
return A->getOption().matches(options::OPT_msoft_float) ||
(A->getOption().matches(options::OPT_mfloat_abi_EQ) &&
A->getValue() == StringRef("soft"));
}
/// \p Flag must be a flag accepted by the driver with its leading '-' removed,
// otherwise '-print-multi-lib' will not emit them correctly.
static void addMultilibFlag(bool Enabled, const char *const Flag,
std::vector<std::string> &Flags) {
if (Enabled)
Flags.push_back(std::string("+") + Flag);
else
Flags.push_back(std::string("-") + Flag);
}
static bool isArmOrThumbArch(llvm::Triple::ArchType Arch) {
return Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb;
}
static bool isMipsEL(llvm::Triple::ArchType Arch) {
return Arch == llvm::Triple::mipsel || Arch == llvm::Triple::mips64el;
}
static bool isMips16(const ArgList &Args) {
Arg *A = Args.getLastArg(options::OPT_mips16, options::OPT_mno_mips16);
return A && A->getOption().matches(options::OPT_mips16);
}
static bool isMicroMips(const ArgList &Args) {
Arg *A = Args.getLastArg(options::OPT_mmicromips, options::OPT_mno_micromips);
return A && A->getOption().matches(options::OPT_mmicromips);
}
static bool isRISCV(llvm::Triple::ArchType Arch) {
return Arch == llvm::Triple::riscv32 || Arch == llvm::Triple::riscv64;
}
static Multilib makeMultilib(StringRef commonSuffix) {
return Multilib(commonSuffix, commonSuffix, commonSuffix);
}
static bool findMipsCsMultilibs(const Multilib::flags_list &Flags,
FilterNonExistent &NonExistent,
DetectedMultilibs &Result) {
// Check for Code Sourcery toolchain multilibs
MultilibSet CSMipsMultilibs;
{
auto MArchMips16 = makeMultilib("/mips16").flag("+m32").flag("+mips16");
auto MArchMicroMips =
makeMultilib("/micromips").flag("+m32").flag("+mmicromips");
auto MArchDefault = makeMultilib("").flag("-mips16").flag("-mmicromips");
auto UCLibc = makeMultilib("/uclibc").flag("+muclibc");
auto SoftFloat = makeMultilib("/soft-float").flag("+msoft-float");
auto Nan2008 = makeMultilib("/nan2008").flag("+mnan=2008");
auto DefaultFloat =
makeMultilib("").flag("-msoft-float").flag("-mnan=2008");
auto BigEndian = makeMultilib("").flag("+EB").flag("-EL");
auto LittleEndian = makeMultilib("/el").flag("+EL").flag("-EB");
// Note that this one's osSuffix is ""
auto MAbi64 = makeMultilib("")
.gccSuffix("/64")
.includeSuffix("/64")
.flag("+mabi=n64")
.flag("-mabi=n32")
.flag("-m32");
CSMipsMultilibs =
MultilibSet()
.Either(MArchMips16, MArchMicroMips, MArchDefault)
.Maybe(UCLibc)
.Either(SoftFloat, Nan2008, DefaultFloat)
.FilterOut("/micromips/nan2008")
.FilterOut("/mips16/nan2008")
.Either(BigEndian, LittleEndian)
.Maybe(MAbi64)
.FilterOut("/mips16.*/64")
.FilterOut("/micromips.*/64")
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
std::vector<std::string> Dirs({"/include"});
if (StringRef(M.includeSuffix()).startswith("/uclibc"))
Dirs.push_back(
"/../../../../mips-linux-gnu/libc/uclibc/usr/include");
else
Dirs.push_back("/../../../../mips-linux-gnu/libc/usr/include");
return Dirs;
});
}
MultilibSet DebianMipsMultilibs;
{
Multilib MAbiN32 =
Multilib().gccSuffix("/n32").includeSuffix("/n32").flag("+mabi=n32");
Multilib M64 = Multilib()
.gccSuffix("/64")
.includeSuffix("/64")
.flag("+m64")
.flag("-m32")
.flag("-mabi=n32");
Multilib M32 = Multilib().flag("-m64").flag("+m32").flag("-mabi=n32");
DebianMipsMultilibs =
MultilibSet().Either(M32, M64, MAbiN32).FilterOut(NonExistent);
}
// Sort candidates. Toolchain that best meets the directories tree goes first.
// Then select the first toolchains matches command line flags.
MultilibSet *Candidates[] = {&CSMipsMultilibs, &DebianMipsMultilibs};
if (CSMipsMultilibs.size() < DebianMipsMultilibs.size())
std::iter_swap(Candidates, Candidates + 1);
for (const MultilibSet *Candidate : Candidates) {
if (Candidate->select(Flags, Result.SelectedMultilib)) {
if (Candidate == &DebianMipsMultilibs)
Result.BiarchSibling = Multilib();
Result.Multilibs = *Candidate;
return true;
}
}
return false;
}
static bool findMipsAndroidMultilibs(vfs::FileSystem &VFS, StringRef Path,
const Multilib::flags_list &Flags,
FilterNonExistent &NonExistent,
DetectedMultilibs &Result) {
MultilibSet AndroidMipsMultilibs =
MultilibSet()
.Maybe(Multilib("/mips-r2").flag("+march=mips32r2"))
.Maybe(Multilib("/mips-r6").flag("+march=mips32r6"))
.FilterOut(NonExistent);
MultilibSet AndroidMipselMultilibs =
MultilibSet()
.Either(Multilib().flag("+march=mips32"),
Multilib("/mips-r2", "", "/mips-r2").flag("+march=mips32r2"),
Multilib("/mips-r6", "", "/mips-r6").flag("+march=mips32r6"))
.FilterOut(NonExistent);
MultilibSet AndroidMips64elMultilibs =
MultilibSet()
.Either(
Multilib().flag("+march=mips64r6"),
Multilib("/32/mips-r1", "", "/mips-r1").flag("+march=mips32"),
Multilib("/32/mips-r2", "", "/mips-r2").flag("+march=mips32r2"),
Multilib("/32/mips-r6", "", "/mips-r6").flag("+march=mips32r6"))
.FilterOut(NonExistent);
MultilibSet *MS = &AndroidMipsMultilibs;
if (VFS.exists(Path + "/mips-r6"))
MS = &AndroidMipselMultilibs;
else if (VFS.exists(Path + "/32"))
MS = &AndroidMips64elMultilibs;
if (MS->select(Flags, Result.SelectedMultilib)) {
Result.Multilibs = *MS;
return true;
}
return false;
}
static bool findMipsMuslMultilibs(const Multilib::flags_list &Flags,
FilterNonExistent &NonExistent,
DetectedMultilibs &Result) {
// Musl toolchain multilibs
MultilibSet MuslMipsMultilibs;
{
auto MArchMipsR2 = makeMultilib("")
.osSuffix("/mips-r2-hard-musl")
.flag("+EB")
.flag("-EL")
.flag("+march=mips32r2");
auto MArchMipselR2 = makeMultilib("/mipsel-r2-hard-musl")
.flag("-EB")
.flag("+EL")
.flag("+march=mips32r2");
MuslMipsMultilibs = MultilibSet().Either(MArchMipsR2, MArchMipselR2);
// Specify the callback that computes the include directories.
MuslMipsMultilibs.setIncludeDirsCallback([](const Multilib &M) {
return std::vector<std::string>(
{"/../sysroot" + M.osSuffix() + "/usr/include"});
});
}
if (MuslMipsMultilibs.select(Flags, Result.SelectedMultilib)) {
Result.Multilibs = MuslMipsMultilibs;
return true;
}
return false;
}
static bool findMipsMtiMultilibs(const Multilib::flags_list &Flags,
FilterNonExistent &NonExistent,
DetectedMultilibs &Result) {
// CodeScape MTI toolchain v1.2 and early.
MultilibSet MtiMipsMultilibsV1;
{
auto MArchMips32 = makeMultilib("/mips32")
.flag("+m32")
.flag("-m64")
.flag("-mmicromips")
.flag("+march=mips32");
auto MArchMicroMips = makeMultilib("/micromips")
.flag("+m32")
.flag("-m64")
.flag("+mmicromips");
auto MArchMips64r2 = makeMultilib("/mips64r2")
.flag("-m32")
.flag("+m64")
.flag("+march=mips64r2");
auto MArchMips64 = makeMultilib("/mips64").flag("-m32").flag("+m64").flag(
"-march=mips64r2");
auto MArchDefault = makeMultilib("")
.flag("+m32")
.flag("-m64")
.flag("-mmicromips")
.flag("+march=mips32r2");
auto Mips16 = makeMultilib("/mips16").flag("+mips16");
auto UCLibc = makeMultilib("/uclibc").flag("+muclibc");
auto MAbi64 =
makeMultilib("/64").flag("+mabi=n64").flag("-mabi=n32").flag("-m32");
auto BigEndian = makeMultilib("").flag("+EB").flag("-EL");
auto LittleEndian = makeMultilib("/el").flag("+EL").flag("-EB");
auto SoftFloat = makeMultilib("/sof").flag("+msoft-float");
auto Nan2008 = makeMultilib("/nan2008").flag("+mnan=2008");
MtiMipsMultilibsV1 =
MultilibSet()
.Either(MArchMips32, MArchMicroMips, MArchMips64r2, MArchMips64,
MArchDefault)
.Maybe(UCLibc)
.Maybe(Mips16)
.FilterOut("/mips64/mips16")
.FilterOut("/mips64r2/mips16")
.FilterOut("/micromips/mips16")
.Maybe(MAbi64)
.FilterOut("/micromips/64")
.FilterOut("/mips32/64")
.FilterOut("^/64")
.FilterOut("/mips16/64")
.Either(BigEndian, LittleEndian)
.Maybe(SoftFloat)
.Maybe(Nan2008)
.FilterOut(".*sof/nan2008")
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
std::vector<std::string> Dirs({"/include"});
if (StringRef(M.includeSuffix()).startswith("/uclibc"))
Dirs.push_back("/../../../../sysroot/uclibc/usr/include");
else
Dirs.push_back("/../../../../sysroot/usr/include");
return Dirs;
});
}
// CodeScape IMG toolchain starting from v1.3.
MultilibSet MtiMipsMultilibsV2;
{
auto BeHard = makeMultilib("/mips-r2-hard")
.flag("+EB")
.flag("-msoft-float")
.flag("-mnan=2008")
.flag("-muclibc");
auto BeSoft = makeMultilib("/mips-r2-soft")
.flag("+EB")
.flag("+msoft-float")
.flag("-mnan=2008");
auto ElHard = makeMultilib("/mipsel-r2-hard")
.flag("+EL")
.flag("-msoft-float")
.flag("-mnan=2008")
.flag("-muclibc");
auto ElSoft = makeMultilib("/mipsel-r2-soft")
.flag("+EL")
.flag("+msoft-float")
.flag("-mnan=2008")
.flag("-mmicromips");
auto BeHardNan = makeMultilib("/mips-r2-hard-nan2008")
.flag("+EB")
.flag("-msoft-float")
.flag("+mnan=2008")
.flag("-muclibc");
auto ElHardNan = makeMultilib("/mipsel-r2-hard-nan2008")
.flag("+EL")
.flag("-msoft-float")
.flag("+mnan=2008")
.flag("-muclibc")
.flag("-mmicromips");
auto BeHardNanUclibc = makeMultilib("/mips-r2-hard-nan2008-uclibc")
.flag("+EB")
.flag("-msoft-float")
.flag("+mnan=2008")
.flag("+muclibc");
auto ElHardNanUclibc = makeMultilib("/mipsel-r2-hard-nan2008-uclibc")
.flag("+EL")
.flag("-msoft-float")
.flag("+mnan=2008")
.flag("+muclibc");
auto BeHardUclibc = makeMultilib("/mips-r2-hard-uclibc")
.flag("+EB")
.flag("-msoft-float")
.flag("-mnan=2008")
.flag("+muclibc");
auto ElHardUclibc = makeMultilib("/mipsel-r2-hard-uclibc")
.flag("+EL")
.flag("-msoft-float")
.flag("-mnan=2008")
.flag("+muclibc");
auto ElMicroHardNan = makeMultilib("/micromipsel-r2-hard-nan2008")
.flag("+EL")
.flag("-msoft-float")
.flag("+mnan=2008")
.flag("+mmicromips");
auto ElMicroSoft = makeMultilib("/micromipsel-r2-soft")
.flag("+EL")
.flag("+msoft-float")
.flag("-mnan=2008")
.flag("+mmicromips");
auto O32 =
makeMultilib("/lib").osSuffix("").flag("-mabi=n32").flag("-mabi=n64");
auto N32 =
makeMultilib("/lib32").osSuffix("").flag("+mabi=n32").flag("-mabi=n64");
auto N64 =
makeMultilib("/lib64").osSuffix("").flag("-mabi=n32").flag("+mabi=n64");
MtiMipsMultilibsV2 =
MultilibSet()
.Either({BeHard, BeSoft, ElHard, ElSoft, BeHardNan, ElHardNan,
BeHardNanUclibc, ElHardNanUclibc, BeHardUclibc,
ElHardUclibc, ElMicroHardNan, ElMicroSoft})
.Either(O32, N32, N64)
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
return std::vector<std::string>({"/../../../../sysroot" +
M.includeSuffix() +
"/../usr/include"});
})
.setFilePathsCallback([](const Multilib &M) {
return std::vector<std::string>(
{"/../../../../mips-mti-linux-gnu/lib" + M.gccSuffix()});
});
}
for (auto Candidate : {&MtiMipsMultilibsV1, &MtiMipsMultilibsV2}) {
if (Candidate->select(Flags, Result.SelectedMultilib)) {
Result.Multilibs = *Candidate;
return true;
}
}
return false;
}
static bool findMipsImgMultilibs(const Multilib::flags_list &Flags,
FilterNonExistent &NonExistent,
DetectedMultilibs &Result) {
// CodeScape IMG toolchain v1.2 and early.
MultilibSet ImgMultilibsV1;
{
auto Mips64r6 = makeMultilib("/mips64r6").flag("+m64").flag("-m32");
auto LittleEndian = makeMultilib("/el").flag("+EL").flag("-EB");
auto MAbi64 =
makeMultilib("/64").flag("+mabi=n64").flag("-mabi=n32").flag("-m32");
ImgMultilibsV1 =
MultilibSet()
.Maybe(Mips64r6)
.Maybe(MAbi64)
.Maybe(LittleEndian)
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
return std::vector<std::string>(
{"/include", "/../../../../sysroot/usr/include"});
});
}
// CodeScape IMG toolchain starting from v1.3.
MultilibSet ImgMultilibsV2;
{
auto BeHard = makeMultilib("/mips-r6-hard")
.flag("+EB")
.flag("-msoft-float")
.flag("-mmicromips");
auto BeSoft = makeMultilib("/mips-r6-soft")
.flag("+EB")
.flag("+msoft-float")
.flag("-mmicromips");
auto ElHard = makeMultilib("/mipsel-r6-hard")
.flag("+EL")
.flag("-msoft-float")
.flag("-mmicromips");
auto ElSoft = makeMultilib("/mipsel-r6-soft")
.flag("+EL")
.flag("+msoft-float")
.flag("-mmicromips");
auto BeMicroHard = makeMultilib("/micromips-r6-hard")
.flag("+EB")
.flag("-msoft-float")
.flag("+mmicromips");
auto BeMicroSoft = makeMultilib("/micromips-r6-soft")
.flag("+EB")
.flag("+msoft-float")
.flag("+mmicromips");
auto ElMicroHard = makeMultilib("/micromipsel-r6-hard")
.flag("+EL")
.flag("-msoft-float")
.flag("+mmicromips");
auto ElMicroSoft = makeMultilib("/micromipsel-r6-soft")
.flag("+EL")
.flag("+msoft-float")
.flag("+mmicromips");
auto O32 =
makeMultilib("/lib").osSuffix("").flag("-mabi=n32").flag("-mabi=n64");
auto N32 =
makeMultilib("/lib32").osSuffix("").flag("+mabi=n32").flag("-mabi=n64");
auto N64 =
makeMultilib("/lib64").osSuffix("").flag("-mabi=n32").flag("+mabi=n64");
ImgMultilibsV2 =
MultilibSet()
.Either({BeHard, BeSoft, ElHard, ElSoft, BeMicroHard, BeMicroSoft,
ElMicroHard, ElMicroSoft})
.Either(O32, N32, N64)
.FilterOut(NonExistent)
.setIncludeDirsCallback([](const Multilib &M) {
return std::vector<std::string>({"/../../../../sysroot" +
M.includeSuffix() +
"/../usr/include"});
})
.setFilePathsCallback([](const Multilib &M) {
return std::vector<std::string>(
{"/../../../../mips-img-linux-gnu/lib" + M.gccSuffix()});
});
}
for (auto Candidate : {&ImgMultilibsV1, &ImgMultilibsV2}) {
if (Candidate->select(Flags, Result.SelectedMultilib)) {
Result.Multilibs = *Candidate;
return true;
}
}
return false;
}
bool clang::driver::findMIPSMultilibs(const Driver &D,
const llvm::Triple &TargetTriple,
StringRef Path, const ArgList &Args,
DetectedMultilibs &Result) {
FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
StringRef CPUName;
StringRef ABIName;
tools::mips::getMipsCPUAndABI(Args, TargetTriple, CPUName, ABIName);
llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
Multilib::flags_list Flags;
addMultilibFlag(TargetTriple.isMIPS32(), "m32", Flags);
addMultilibFlag(TargetTriple.isMIPS64(), "m64", Flags);
addMultilibFlag(isMips16(Args), "mips16", Flags);
addMultilibFlag(CPUName == "mips32", "march=mips32", Flags);
addMultilibFlag(CPUName == "mips32r2" || CPUName == "mips32r3" ||
CPUName == "mips32r5" || CPUName == "p5600",
"march=mips32r2", Flags);
addMultilibFlag(CPUName == "mips32r6", "march=mips32r6", Flags);
addMultilibFlag(CPUName == "mips64", "march=mips64", Flags);
addMultilibFlag(CPUName == "mips64r2" || CPUName == "mips64r3" ||
CPUName == "mips64r5" || CPUName == "octeon",
"march=mips64r2", Flags);
addMultilibFlag(CPUName == "mips64r6", "march=mips64r6", Flags);
addMultilibFlag(isMicroMips(Args), "mmicromips", Flags);
addMultilibFlag(tools::mips::isUCLibc(Args), "muclibc", Flags);
addMultilibFlag(tools::mips::isNaN2008(Args, TargetTriple), "mnan=2008",
Flags);
addMultilibFlag(ABIName == "n32", "mabi=n32", Flags);
addMultilibFlag(ABIName == "n64", "mabi=n64", Flags);
addMultilibFlag(isSoftFloatABI(Args), "msoft-float", Flags);
addMultilibFlag(!isSoftFloatABI(Args), "mhard-float", Flags);
addMultilibFlag(isMipsEL(TargetArch), "EL", Flags);
addMultilibFlag(!isMipsEL(TargetArch), "EB", Flags);
if (TargetTriple.isAndroid())
return findMipsAndroidMultilibs(D.getVFS(), Path, Flags, NonExistent,
Result);
if (TargetTriple.getVendor() == llvm::Triple::MipsTechnologies &&
TargetTriple.getOS() == llvm::Triple::Linux &&
TargetTriple.getEnvironment() == llvm::Triple::UnknownEnvironment)
return findMipsMuslMultilibs(Flags, NonExistent, Result);
if (TargetTriple.getVendor() == llvm::Triple::MipsTechnologies &&
TargetTriple.getOS() == llvm::Triple::Linux &&
TargetTriple.isGNUEnvironment())
return findMipsMtiMultilibs(Flags, NonExistent, Result);
if (TargetTriple.getVendor() == llvm::Triple::ImaginationTechnologies &&
TargetTriple.getOS() == llvm::Triple::Linux &&
TargetTriple.isGNUEnvironment())
return findMipsImgMultilibs(Flags, NonExistent, Result);
if (findMipsCsMultilibs(Flags, NonExistent, Result))
return true;
// Fallback to the regular toolchain-tree structure.
Multilib Default;
Result.Multilibs.push_back(Default);
Result.Multilibs.FilterOut(NonExistent);
if (Result.Multilibs.select(Flags, Result.SelectedMultilib)) {
Result.BiarchSibling = Multilib();
return true;
}
return false;
}
static void findAndroidArmMultilibs(const Driver &D,
const llvm::Triple &TargetTriple,
StringRef Path, const ArgList &Args,
DetectedMultilibs &Result) {
// Find multilibs with subdirectories like armv7-a, thumb, armv7-a/thumb.
FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
Multilib ArmV7Multilib = makeMultilib("/armv7-a")
.flag("+march=armv7-a")
.flag("-mthumb");
Multilib ThumbMultilib = makeMultilib("/thumb")
.flag("-march=armv7-a")
.flag("+mthumb");
Multilib ArmV7ThumbMultilib = makeMultilib("/armv7-a/thumb")
.flag("+march=armv7-a")
.flag("+mthumb");
Multilib DefaultMultilib = makeMultilib("")
.flag("-march=armv7-a")
.flag("-mthumb");
MultilibSet AndroidArmMultilibs =
MultilibSet()
.Either(ThumbMultilib, ArmV7Multilib,
ArmV7ThumbMultilib, DefaultMultilib)
.FilterOut(NonExistent);
Multilib::flags_list Flags;
llvm::StringRef Arch = Args.getLastArgValue(options::OPT_march_EQ);
bool IsArmArch = TargetTriple.getArch() == llvm::Triple::arm;
bool IsThumbArch = TargetTriple.getArch() == llvm::Triple::thumb;
bool IsV7SubArch = TargetTriple.getSubArch() == llvm::Triple::ARMSubArch_v7;
bool IsThumbMode = IsThumbArch ||
Args.hasFlag(options::OPT_mthumb, options::OPT_mno_thumb, false) ||
(IsArmArch && llvm::ARM::parseArchISA(Arch) == llvm::ARM::ISAKind::THUMB);
bool IsArmV7Mode = (IsArmArch || IsThumbArch) &&
(llvm::ARM::parseArchVersion(Arch) == 7 ||
(IsArmArch && Arch == "" && IsV7SubArch));
addMultilibFlag(IsArmV7Mode, "march=armv7-a", Flags);
addMultilibFlag(IsThumbMode, "mthumb", Flags);
if (AndroidArmMultilibs.select(Flags, Result.SelectedMultilib))
Result.Multilibs = AndroidArmMultilibs;
}
static void findRISCVMultilibs(const Driver &D,
const llvm::Triple &TargetTriple, StringRef Path,
const ArgList &Args, DetectedMultilibs &Result) {
FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
Multilib Ilp32 = makeMultilib("lib32/ilp32").flag("+m32").flag("+mabi=ilp32");
Multilib Ilp32f =
makeMultilib("lib32/ilp32f").flag("+m32").flag("+mabi=ilp32f");
Multilib Ilp32d =
makeMultilib("lib32/ilp32d").flag("+m32").flag("+mabi=ilp32d");
Multilib Lp64 = makeMultilib("lib64/lp64").flag("+m64").flag("+mabi=lp64");
Multilib Lp64f = makeMultilib("lib64/lp64f").flag("+m64").flag("+mabi=lp64f");
Multilib Lp64d = makeMultilib("lib64/lp64d").flag("+m64").flag("+mabi=lp64d");
MultilibSet RISCVMultilibs =
MultilibSet()
.Either({Ilp32, Ilp32f, Ilp32d, Lp64, Lp64f, Lp64d})
.FilterOut(NonExistent);
Multilib::flags_list Flags;
bool IsRV64 = TargetTriple.getArch() == llvm::Triple::riscv64;
StringRef ABIName = tools::riscv::getRISCVABI(Args, TargetTriple);
addMultilibFlag(!IsRV64, "m32", Flags);
addMultilibFlag(IsRV64, "m64", Flags);
addMultilibFlag(ABIName == "ilp32", "mabi=ilp32", Flags);
addMultilibFlag(ABIName == "ilp32f", "mabi=ilp32f", Flags);
addMultilibFlag(ABIName == "ilp32d", "mabi=ilp32d", Flags);
addMultilibFlag(ABIName == "lp64", "mabi=lp64", Flags);
addMultilibFlag(ABIName == "lp64f", "mabi=lp64f", Flags);
addMultilibFlag(ABIName == "lp64d", "mabi=lp64d", Flags);
if (RISCVMultilibs.select(Flags, Result.SelectedMultilib))
Result.Multilibs = RISCVMultilibs;
}
static bool findBiarchMultilibs(const Driver &D,
const llvm::Triple &TargetTriple,
StringRef Path, const ArgList &Args,
bool NeedsBiarchSuffix,
DetectedMultilibs &Result) {
Multilib Default;
// Some versions of SUSE and Fedora on ppc64 put 32-bit libs
// in what would normally be GCCInstallPath and put the 64-bit
// libs in a subdirectory named 64. The simple logic we follow is that
// *if* there is a subdirectory of the right name with crtbegin.o in it,
// we use that. If not, and if not a biarch triple alias, we look for
// crtbegin.o without the subdirectory.
StringRef Suff64 = "/64";
// Solaris uses platform-specific suffixes instead of /64.
if (TargetTriple.getOS() == llvm::Triple::Solaris) {
switch (TargetTriple.getArch()) {
case llvm::Triple::x86:
case llvm::Triple::x86_64:
Suff64 = "/amd64";
break;
case llvm::Triple::sparc:
case llvm::Triple::sparcv9:
Suff64 = "/sparcv9";
break;
default:
break;
}
}
Multilib Alt64 = Multilib()
.gccSuffix(Suff64)
.includeSuffix(Suff64)
.flag("-m32")
.flag("+m64")
.flag("-mx32");
Multilib Alt32 = Multilib()
.gccSuffix("/32")
.includeSuffix("/32")
.flag("+m32")
.flag("-m64")
.flag("-mx32");
Multilib Altx32 = Multilib()
.gccSuffix("/x32")
.includeSuffix("/x32")
.flag("-m32")
.flag("-m64")
.flag("+mx32");
// GCC toolchain for IAMCU doesn't have crtbegin.o, so look for libgcc.a.
FilterNonExistent NonExistent(
Path, TargetTriple.isOSIAMCU() ? "/libgcc.a" : "/crtbegin.o", D.getVFS());
// Determine default multilib from: 32, 64, x32
// Also handle cases such as 64 on 32, 32 on 64, etc.
enum { UNKNOWN, WANT32, WANT64, WANTX32 } Want = UNKNOWN;
const bool IsX32 = TargetTriple.getEnvironment() == llvm::Triple::GNUX32;
if (TargetTriple.isArch32Bit() && !NonExistent(Alt32))
Want = WANT64;
else if (TargetTriple.isArch64Bit() && IsX32 && !NonExistent(Altx32))
Want = WANT64;
else if (TargetTriple.isArch64Bit() && !IsX32 && !NonExistent(Alt64))
Want = WANT32;
else {
if (TargetTriple.isArch32Bit())
Want = NeedsBiarchSuffix ? WANT64 : WANT32;
else if (IsX32)
Want = NeedsBiarchSuffix ? WANT64 : WANTX32;
else
Want = NeedsBiarchSuffix ? WANT32 : WANT64;
}
if (Want == WANT32)
Default.flag("+m32").flag("-m64").flag("-mx32");
else if (Want == WANT64)
Default.flag("-m32").flag("+m64").flag("-mx32");
else if (Want == WANTX32)
Default.flag("-m32").flag("-m64").flag("+mx32");
else
return false;
Result.Multilibs.push_back(Default);
Result.Multilibs.push_back(Alt64);
Result.Multilibs.push_back(Alt32);
Result.Multilibs.push_back(Altx32);
Result.Multilibs.FilterOut(NonExistent);
Multilib::flags_list Flags;
addMultilibFlag(TargetTriple.isArch64Bit() && !IsX32, "m64", Flags);
addMultilibFlag(TargetTriple.isArch32Bit(), "m32", Flags);
addMultilibFlag(TargetTriple.isArch64Bit() && IsX32, "mx32", Flags);
if (!Result.Multilibs.select(Flags, Result.SelectedMultilib))
return false;
if (Result.SelectedMultilib == Alt64 || Result.SelectedMultilib == Alt32 ||
Result.SelectedMultilib == Altx32)
Result.BiarchSibling = Default;
return true;
}
/// Generic_GCC - A tool chain using the 'gcc' command to perform
/// all subcommands; this relies on gcc translating the majority of
/// command line options.
/// Less-than for GCCVersion, implementing a Strict Weak Ordering.
bool Generic_GCC::GCCVersion::isOlderThan(int RHSMajor, int RHSMinor,
int RHSPatch,
StringRef RHSPatchSuffix) const {
if (Major != RHSMajor)
return Major < RHSMajor;
if (Minor != RHSMinor)
return Minor < RHSMinor;
if (Patch != RHSPatch) {
// Note that versions without a specified patch sort higher than those with
// a patch.
if (RHSPatch == -1)
return true;
if (Patch == -1)
return false;
// Otherwise just sort on the patch itself.
return Patch < RHSPatch;
}
if (PatchSuffix != RHSPatchSuffix) {
// Sort empty suffixes higher.
if (RHSPatchSuffix.empty())
return true;
if (PatchSuffix.empty())
return false;
// Provide a lexicographic sort to make this a total ordering.
return PatchSuffix < RHSPatchSuffix;
}
// The versions are equal.
return false;
}
/// Parse a GCCVersion object out of a string of text.
///
/// This is the primary means of forming GCCVersion objects.
/*static*/
Generic_GCC::GCCVersion Generic_GCC::GCCVersion::Parse(StringRef VersionText) {
const GCCVersion BadVersion = {VersionText.str(), -1, -1, -1, "", "", ""};
std::pair<StringRef, StringRef> First = VersionText.split('.');
std::pair<StringRef, StringRef> Second = First.second.split('.');
GCCVersion GoodVersion = {VersionText.str(), -1, -1, -1, "", "", ""};
if (First.first.getAsInteger(10, GoodVersion.Major) || GoodVersion.Major < 0)
return BadVersion;
GoodVersion.MajorStr = First.first.str();
if (First.second.empty())
return GoodVersion;
StringRef MinorStr = Second.first;
if (Second.second.empty()) {
if (size_t EndNumber = MinorStr.find_first_not_of("0123456789")) {
GoodVersion.PatchSuffix = MinorStr.substr(EndNumber);
MinorStr = MinorStr.slice(0, EndNumber);
}
}
if (MinorStr.getAsInteger(10, GoodVersion.Minor) || GoodVersion.Minor < 0)
return BadVersion;
GoodVersion.MinorStr = MinorStr.str();
// First look for a number prefix and parse that if present. Otherwise just
// stash the entire patch string in the suffix, and leave the number
// unspecified. This covers versions strings such as:
// 5 (handled above)
// 4.4
// 4.4-patched
// 4.4.0
// 4.4.x
// 4.4.2-rc4
// 4.4.x-patched
// And retains any patch number it finds.
StringRef PatchText = Second.second;
if (!PatchText.empty()) {
if (size_t EndNumber = PatchText.find_first_not_of("0123456789")) {
// Try to parse the number and any suffix.
if (PatchText.slice(0, EndNumber).getAsInteger(10, GoodVersion.Patch) ||
GoodVersion.Patch < 0)
return BadVersion;
GoodVersion.PatchSuffix = PatchText.substr(EndNumber);
}
}
return GoodVersion;
}
static llvm::StringRef getGCCToolchainDir(const ArgList &Args) {
const Arg *A = Args.getLastArg(clang::driver::options::OPT_gcc_toolchain);
if (A)
return A->getValue();
return GCC_INSTALL_PREFIX;
}
/// Initialize a GCCInstallationDetector from the driver.
///
/// This performs all of the autodetection and sets up the various paths.
/// Once constructed, a GCCInstallationDetector is essentially immutable.
///
/// FIXME: We shouldn't need an explicit TargetTriple parameter here, and
/// should instead pull the target out of the driver. This is currently
/// necessary because the driver doesn't store the final version of the target
/// triple.
void Generic_GCC::GCCInstallationDetector::init(
const llvm::Triple &TargetTriple, const ArgList &Args,
ArrayRef<std::string> ExtraTripleAliases) {
llvm::Triple BiarchVariantTriple = TargetTriple.isArch32Bit()
? TargetTriple.get64BitArchVariant()
: TargetTriple.get32BitArchVariant();
// The library directories which may contain GCC installations.
SmallVector<StringRef, 4> CandidateLibDirs, CandidateBiarchLibDirs;
// The compatible GCC triples for this particular architecture.
SmallVector<StringRef, 16> CandidateTripleAliases;
SmallVector<StringRef, 16> CandidateBiarchTripleAliases;
CollectLibDirsAndTriples(TargetTriple, BiarchVariantTriple, CandidateLibDirs,
CandidateTripleAliases, CandidateBiarchLibDirs,
CandidateBiarchTripleAliases);
// Compute the set of prefixes for our search.
SmallVector<std::string, 8> Prefixes(D.PrefixDirs.begin(),
D.PrefixDirs.end());
StringRef GCCToolchainDir = getGCCToolchainDir(Args);
if (GCCToolchainDir != "") {
if (GCCToolchainDir.back() == '/')
GCCToolchainDir = GCCToolchainDir.drop_back(); // remove the /
Prefixes.push_back(GCCToolchainDir);
} else {
// If we have a SysRoot, try that first.
if (!D.SysRoot.empty()) {
Prefixes.push_back(D.SysRoot);
AddDefaultGCCPrefixes(TargetTriple, Prefixes, D.SysRoot);
}
// Then look for gcc installed alongside clang.
Prefixes.push_back(D.InstalledDir + "/..");
// Next, look for prefix(es) that correspond to distribution-supplied gcc
// installations.
if (D.SysRoot.empty()) {
// Typically /usr.
AddDefaultGCCPrefixes(TargetTriple, Prefixes, D.SysRoot);
}
}
// Try to respect gcc-config on Gentoo. However, do that only
// if --gcc-toolchain is not provided or equal to the Gentoo install
// in /usr. This avoids accidentally enforcing the system GCC version
// when using a custom toolchain.
if (GCCToolchainDir == "" || GCCToolchainDir == D.SysRoot + "/usr") {
SmallVector<StringRef, 16> GentooTestTriples;
// Try to match an exact triple as target triple first.
// e.g. crossdev -S x86_64-gentoo-linux-gnu will install gcc libs for
// x86_64-gentoo-linux-gnu. But "clang -target x86_64-gentoo-linux-gnu"
// may pick the libraries for x86_64-pc-linux-gnu even when exact matching
// triple x86_64-gentoo-linux-gnu is present.
GentooTestTriples.push_back(TargetTriple.str());
// Check rest of triples.
GentooTestTriples.append(ExtraTripleAliases.begin(),
ExtraTripleAliases.end());
GentooTestTriples.append(CandidateTripleAliases.begin(),
CandidateTripleAliases.end());
if (ScanGentooConfigs(TargetTriple, Args, GentooTestTriples,
CandidateBiarchTripleAliases))
return;
}
// Loop over the various components which exist and select the best GCC
// installation available. GCC installs are ranked by version number.
Version = GCCVersion::Parse("0.0.0");
for (const std::string &Prefix : Prefixes) {
if (!D.getVFS().exists(Prefix))
continue;
for (StringRef Suffix : CandidateLibDirs) {
const std::string LibDir = Prefix + Suffix.str();
if (!D.getVFS().exists(LibDir))
continue;
// Try to match the exact target triple first.
ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, TargetTriple.str());
// Try rest of possible triples.
for (StringRef Candidate : ExtraTripleAliases) // Try these first.
ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate);
for (StringRef Candidate : CandidateTripleAliases)
ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate);
}
for (StringRef Suffix : CandidateBiarchLibDirs) {
const std::string LibDir = Prefix + Suffix.str();
if (!D.getVFS().exists(LibDir))
continue;
for (StringRef Candidate : CandidateBiarchTripleAliases)
ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate,
/*NeedsBiarchSuffix=*/ true);
}
}
}
void Generic_GCC::GCCInstallationDetector::print(raw_ostream &OS) const {
for (const auto &InstallPath : CandidateGCCInstallPaths)
OS << "Found candidate GCC installation: " << InstallPath << "\n";
if (!GCCInstallPath.empty())
OS << "Selected GCC installation: " << GCCInstallPath << "\n";
for (const auto &Multilib : Multilibs)
OS << "Candidate multilib: " << Multilib << "\n";
if (Multilibs.size() != 0 || !SelectedMultilib.isDefault())
OS << "Selected multilib: " << SelectedMultilib << "\n";
}
bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
if (BiarchSibling.hasValue()) {
M = BiarchSibling.getValue();
return true;
}
return false;
}
void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
const llvm::Triple &TargetTriple, SmallVectorImpl<std::string> &Prefixes,
StringRef SysRoot) {
if (TargetTriple.getOS() == llvm::Triple::Solaris) {
// Solaris is a special case.
// The GCC installation is under
// /usr/gcc/<major>.<minor>/lib/gcc/<triple>/<major>.<minor>.<patch>/
// so we need to find those /usr/gcc/*/lib/gcc libdirs and go with
// /usr/gcc/<version> as a prefix.
std::string PrefixDir = SysRoot.str() + "/usr/gcc";
std::error_code EC;
for (vfs::directory_iterator LI = D.getVFS().dir_begin(PrefixDir, EC), LE;
!EC && LI != LE; LI = LI.increment(EC)) {
StringRef VersionText = llvm::sys::path::filename(LI->getName());
GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
// Filter out obviously bad entries.
if (CandidateVersion.Major == -1 || CandidateVersion.isOlderThan(4, 1, 1))
continue;
std::string CandidatePrefix = PrefixDir + "/" + VersionText.str();
std::string CandidateLibPath = CandidatePrefix + "/lib/gcc";
if (!D.getVFS().exists(CandidateLibPath))
continue;
Prefixes.push_back(CandidatePrefix);
}
return;
}
// Non-Solaris is much simpler - most systems just go with "/usr".
if (SysRoot.empty() && TargetTriple.getOS() == llvm::Triple::Linux) {
// Yet, still look for RHEL devtoolsets.
Prefixes.push_back("/opt/rh/devtoolset-7/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-6/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-4/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-3/root/usr");
Prefixes.push_back("/opt/rh/devtoolset-2/root/usr");
}
Prefixes.push_back(SysRoot.str() + "/usr");
}
/*static*/ void Generic_GCC::GCCInstallationDetector::CollectLibDirsAndTriples(
const llvm::Triple &TargetTriple, const llvm::Triple &BiarchTriple,
SmallVectorImpl<StringRef> &LibDirs,
SmallVectorImpl<StringRef> &TripleAliases,
SmallVectorImpl<StringRef> &BiarchLibDirs,
SmallVectorImpl<StringRef> &BiarchTripleAliases) {
// Declare a bunch of static data sets that we'll select between below. These
// are specifically designed to always refer to string literals to avoid any
// lifetime or initialization issues.
static const char *const AArch64LibDirs[] = {"/lib64", "/lib"};
static const char *const AArch64Triples[] = {
"aarch64-none-linux-gnu", "aarch64-linux-gnu", "aarch64-redhat-linux",
"aarch64-suse-linux"};
static const char *const AArch64beLibDirs[] = {"/lib"};
static const char *const AArch64beTriples[] = {"aarch64_be-none-linux-gnu",
"aarch64_be-linux-gnu"};
static const char *const ARMLibDirs[] = {"/lib"};
static const char *const ARMTriples[] = {"arm-linux-gnueabi"};
static const char *const ARMHFTriples[] = {"arm-linux-gnueabihf",
"armv7hl-redhat-linux-gnueabi",
"armv6hl-suse-linux-gnueabi",
"armv7hl-suse-linux-gnueabi"};
static const char *const ARMebLibDirs[] = {"/lib"};
static const char *const ARMebTriples[] = {"armeb-linux-gnueabi"};
static const char *const ARMebHFTriples[] = {
"armeb-linux-gnueabihf", "armebv7hl-redhat-linux-gnueabi"};
static const char *const X86_64LibDirs[] = {"/lib64", "/lib"};
static const char *const X86_64Triples[] = {
"x86_64-linux-gnu", "x86_64-unknown-linux-gnu",
"x86_64-pc-linux-gnu", "x86_64-redhat-linux6E",
"x86_64-redhat-linux", "x86_64-suse-linux",
"x86_64-manbo-linux-gnu", "x86_64-linux-gnu",
"x86_64-slackware-linux", "x86_64-unknown-linux",
"x86_64-amazon-linux"};
static const char *const X32LibDirs[] = {"/libx32"};
static const char *const X86LibDirs[] = {"/lib32", "/lib"};
static const char *const X86Triples[] = {
"i686-linux-gnu", "i686-pc-linux-gnu", "i486-linux-gnu",
"i386-linux-gnu", "i386-redhat-linux6E", "i686-redhat-linux",
"i586-redhat-linux", "i386-redhat-linux", "i586-suse-linux",
"i486-slackware-linux", "i686-montavista-linux", "i586-linux-gnu"};
static const char *const MIPSLibDirs[] = {"/lib"};
static const char *const MIPSTriples[] = {"mips-linux-gnu", "mips-mti-linux",
"mips-mti-linux-gnu",
"mips-img-linux-gnu"};
static const char *const MIPSELLibDirs[] = {"/lib"};
static const char *const MIPSELTriples[] = {"mipsel-linux-gnu",
"mips-img-linux-gnu"};
static const char *const MIPS64LibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64Triples[] = {
"mips64-linux-gnu", "mips-mti-linux-gnu", "mips-img-linux-gnu",
"mips64-linux-gnuabi64"};
static const char *const MIPS64ELLibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64ELTriples[] = {
"mips64el-linux-gnu", "mips-mti-linux-gnu", "mips-img-linux-gnu",
"mips64el-linux-gnuabi64"};
static const char *const PPCLibDirs[] = {"/lib32", "/lib"};
static const char *const PPCTriples[] = {
"powerpc-linux-gnu", "powerpc-unknown-linux-gnu", "powerpc-linux-gnuspe",
"powerpc-suse-linux", "powerpc-montavista-linuxspe"};
static const char *const PPC64LibDirs[] = {"/lib64", "/lib"};
static const char *const PPC64Triples[] = {
"powerpc64-linux-gnu", "powerpc64-unknown-linux-gnu",
"powerpc64-suse-linux", "ppc64-redhat-linux"};
static const char *const PPC64LELibDirs[] = {"/lib64", "/lib"};
static const char *const PPC64LETriples[] = {
"powerpc64le-linux-gnu", "powerpc64le-unknown-linux-gnu",
"powerpc64le-suse-linux", "ppc64le-redhat-linux"};
static const char *const RISCV32LibDirs[] = {"/lib", "/lib32"};
static const char *const RISCVTriples[] = {"riscv32-unknown-linux-gnu",
"riscv64-unknown-linux-gnu",
"riscv32-unknown-elf"};
static const char *const SPARCv8LibDirs[] = {"/lib32", "/lib"};
static const char *const SPARCv8Triples[] = {"sparc-linux-gnu",
"sparcv8-linux-gnu"};
static const char *const SPARCv9LibDirs[] = {"/lib64", "/lib"};
static const char *const SPARCv9Triples[] = {"sparc64-linux-gnu",
"sparcv9-linux-gnu"};
static const char *const SystemZLibDirs[] = {"/lib64", "/lib"};
static const char *const SystemZTriples[] = {
"s390x-linux-gnu", "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu",
"s390x-suse-linux", "s390x-redhat-linux"};
using std::begin;
using std::end;
if (TargetTriple.getOS() == llvm::Triple::Solaris) {
static const char *const SolarisLibDirs[] = {"/lib"};
static const char *const SolarisSparcV8Triples[] = {
"sparc-sun-solaris2.11", "sparc-sun-solaris2.12"};
static const char *const SolarisSparcV9Triples[] = {
"sparcv9-sun-solaris2.11", "sparcv9-sun-solaris2.12"};
static const char *const SolarisX86Triples[] = {"i386-pc-solaris2.11",
"i386-pc-solaris2.12"};
static const char *const SolarisX86_64Triples[] = {"x86_64-pc-solaris2.11",
"x86_64-pc-solaris2.12"};
LibDirs.append(begin(SolarisLibDirs), end(SolarisLibDirs));
BiarchLibDirs.append(begin(SolarisLibDirs), end(SolarisLibDirs));
switch (TargetTriple.getArch()) {
case llvm::Triple::x86:
TripleAliases.append(begin(SolarisX86Triples), end(SolarisX86Triples));
BiarchTripleAliases.append(begin(SolarisX86_64Triples),
end(SolarisX86_64Triples));
break;
case llvm::Triple::x86_64:
TripleAliases.append(begin(SolarisX86_64Triples),
end(SolarisX86_64Triples));
BiarchTripleAliases.append(begin(SolarisX86Triples),
end(SolarisX86Triples));
break;
case llvm::Triple::sparc:
TripleAliases.append(begin(SolarisSparcV8Triples),
end(SolarisSparcV8Triples));
BiarchTripleAliases.append(begin(SolarisSparcV9Triples),
end(SolarisSparcV9Triples));
break;
case llvm::Triple::sparcv9:
TripleAliases.append(begin(SolarisSparcV9Triples),
end(SolarisSparcV9Triples));
BiarchTripleAliases.append(begin(SolarisSparcV8Triples),
end(SolarisSparcV8Triples));
break;
default:
break;
}
return;
}
// Android targets should not use GNU/Linux tools or libraries.
if (TargetTriple.isAndroid()) {
static const char *const AArch64AndroidTriples[] = {
"aarch64-linux-android"};
static const char *const ARMAndroidTriples[] = {"arm-linux-androideabi"};
static const char *const MIPSELAndroidTriples[] = {"mipsel-linux-android"};
static const char *const MIPS64ELAndroidTriples[] = {
"mips64el-linux-android"};
static const char *const X86AndroidTriples[] = {"i686-linux-android"};
static const char *const X86_64AndroidTriples[] = {"x86_64-linux-android"};
switch (TargetTriple.getArch()) {
case llvm::Triple::aarch64:
LibDirs.append(begin(AArch64LibDirs), end(AArch64LibDirs));
TripleAliases.append(begin(AArch64AndroidTriples),
end(AArch64AndroidTriples));
break;
case llvm::Triple::arm:
case llvm::Triple::thumb:
LibDirs.append(begin(ARMLibDirs), end(ARMLibDirs));
TripleAliases.append(begin(ARMAndroidTriples), end(ARMAndroidTriples));
break;
case llvm::Triple::mipsel:
LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
TripleAliases.append(begin(MIPSELAndroidTriples),
end(MIPSELAndroidTriples));
BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
BiarchTripleAliases.append(begin(MIPS64ELAndroidTriples),
end(MIPS64ELAndroidTriples));
break;
case llvm::Triple::mips64el:
LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
TripleAliases.append(begin(MIPS64ELAndroidTriples),
end(MIPS64ELAndroidTriples));
BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
BiarchTripleAliases.append(begin(MIPSELAndroidTriples),
end(MIPSELAndroidTriples));
break;
case llvm::Triple::x86_64:
LibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
TripleAliases.append(begin(X86_64AndroidTriples),
end(X86_64AndroidTriples));
BiarchLibDirs.append(begin(X86LibDirs), end(X86LibDirs));
BiarchTripleAliases.append(begin(X86AndroidTriples),
end(X86AndroidTriples));
break;
case llvm::Triple::x86:
LibDirs.append(begin(X86LibDirs), end(X86LibDirs));
TripleAliases.append(begin(X86AndroidTriples), end(X86AndroidTriples));
BiarchLibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
BiarchTripleAliases.append(begin(X86_64AndroidTriples),
end(X86_64AndroidTriples));
break;
default:
break;
}
return;
}
switch (TargetTriple.getArch()) {
case llvm::Triple::aarch64:
LibDirs.append(begin(AArch64LibDirs), end(AArch64LibDirs));
TripleAliases.append(begin(AArch64Triples), end(AArch64Triples));
BiarchLibDirs.append(begin(AArch64LibDirs), end(AArch64LibDirs));
BiarchTripleAliases.append(begin(AArch64Triples), end(AArch64Triples));
break;
case llvm::Triple::aarch64_be:
LibDirs.append(begin(AArch64beLibDirs), end(AArch64beLibDirs));
TripleAliases.append(begin(AArch64beTriples), end(AArch64beTriples));
BiarchLibDirs.append(begin(AArch64beLibDirs), end(AArch64beLibDirs));
BiarchTripleAliases.append(begin(AArch64beTriples), end(AArch64beTriples));
break;
case llvm::Triple::arm:
case llvm::Triple::thumb:
LibDirs.append(begin(ARMLibDirs), end(ARMLibDirs));
if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
TripleAliases.append(begin(ARMHFTriples), end(ARMHFTriples));
} else {
TripleAliases.append(begin(ARMTriples), end(ARMTriples));
}
break;
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
LibDirs.append(begin(ARMebLibDirs), end(ARMebLibDirs));
if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
TripleAliases.append(begin(ARMebHFTriples), end(ARMebHFTriples));
} else {
TripleAliases.append(begin(ARMebTriples), end(ARMebTriples));
}
break;
case llvm::Triple::x86_64:
LibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
TripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
// x32 is always available when x86_64 is available, so adding it as
// secondary arch with x86_64 triples
if (TargetTriple.getEnvironment() == llvm::Triple::GNUX32) {
BiarchLibDirs.append(begin(X32LibDirs), end(X32LibDirs));
BiarchTripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
} else {
BiarchLibDirs.append(begin(X86LibDirs), end(X86LibDirs));
BiarchTripleAliases.append(begin(X86Triples), end(X86Triples));
}
break;
case llvm::Triple::x86:
LibDirs.append(begin(X86LibDirs), end(X86LibDirs));
// MCU toolchain is 32 bit only and its triple alias is TargetTriple
// itself, which will be appended below.
if (!TargetTriple.isOSIAMCU()) {
TripleAliases.append(begin(X86Triples), end(X86Triples));
BiarchLibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
BiarchTripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
}
break;
case llvm::Triple::mips:
LibDirs.append(begin(MIPSLibDirs), end(MIPSLibDirs));
TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
BiarchLibDirs.append(begin(MIPS64LibDirs), end(MIPS64LibDirs));
BiarchTripleAliases.append(begin(MIPS64Triples), end(MIPS64Triples));
break;
case llvm::Triple::mipsel:
LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
TripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
BiarchTripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
break;
case llvm::Triple::mips64:
LibDirs.append(begin(MIPS64LibDirs), end(MIPS64LibDirs));
TripleAliases.append(begin(MIPS64Triples), end(MIPS64Triples));
BiarchLibDirs.append(begin(MIPSLibDirs), end(MIPSLibDirs));
BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
break;
case llvm::Triple::mips64el:
LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
TripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
BiarchTripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
break;
case llvm::Triple::ppc:
LibDirs.append(begin(PPCLibDirs), end(PPCLibDirs));
TripleAliases.append(begin(PPCTriples), end(PPCTriples));
BiarchLibDirs.append(begin(PPC64LibDirs), end(PPC64LibDirs));
BiarchTripleAliases.append(begin(PPC64Triples), end(PPC64Triples));
break;
case llvm::Triple::ppc64:
LibDirs.append(begin(PPC64LibDirs), end(PPC64LibDirs));
TripleAliases.append(begin(PPC64Triples), end(PPC64Triples));
BiarchLibDirs.append(begin(PPCLibDirs), end(PPCLibDirs));
BiarchTripleAliases.append(begin(PPCTriples), end(PPCTriples));
break;
case llvm::Triple::ppc64le:
LibDirs.append(begin(PPC64LELibDirs), end(PPC64LELibDirs));
TripleAliases.append(begin(PPC64LETriples), end(PPC64LETriples));
break;
case llvm::Triple::riscv32:
LibDirs.append(begin(RISCV32LibDirs), end(RISCV32LibDirs));
BiarchLibDirs.append(begin(RISCV32LibDirs), end(RISCV32LibDirs));
TripleAliases.append(begin(RISCVTriples), end(RISCVTriples));
BiarchTripleAliases.append(begin(RISCVTriples), end(RISCVTriples));
break;
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
LibDirs.append(begin(SPARCv8LibDirs), end(SPARCv8LibDirs));
TripleAliases.append(begin(SPARCv8Triples), end(SPARCv8Triples));
BiarchLibDirs.append(begin(SPARCv9LibDirs), end(SPARCv9LibDirs));
BiarchTripleAliases.append(begin(SPARCv9Triples), end(SPARCv9Triples));
break;
case llvm::Triple::sparcv9:
LibDirs.append(begin(SPARCv9LibDirs), end(SPARCv9LibDirs));
TripleAliases.append(begin(SPARCv9Triples), end(SPARCv9Triples));
BiarchLibDirs.append(begin(SPARCv8LibDirs), end(SPARCv8LibDirs));
BiarchTripleAliases.append(begin(SPARCv8Triples), end(SPARCv8Triples));
break;
case llvm::Triple::systemz:
LibDirs.append(begin(SystemZLibDirs), end(SystemZLibDirs));
TripleAliases.append(begin(SystemZTriples), end(SystemZTriples));
break;
default:
// By default, just rely on the standard lib directories and the original
// triple.
break;
}
// Always append the drivers target triple to the end, in case it doesn't
// match any of our aliases.
TripleAliases.push_back(TargetTriple.str());
// Also include the multiarch variant if it's different.
if (TargetTriple.str() != BiarchTriple.str())
BiarchTripleAliases.push_back(BiarchTriple.str());
}
bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs(
const llvm::Triple &TargetTriple, const ArgList &Args,
StringRef Path, bool NeedsBiarchSuffix) {
llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
DetectedMultilibs Detected;
// Android standalone toolchain could have multilibs for ARM and Thumb.
// Debian mips multilibs behave more like the rest of the biarch ones,
// so handle them there
if (isArmOrThumbArch(TargetArch) && TargetTriple.isAndroid()) {
// It should also work without multilibs in a simplified toolchain.
findAndroidArmMultilibs(D, TargetTriple, Path, Args, Detected);
} else if (TargetTriple.isMIPS()) {
if (!findMIPSMultilibs(D, TargetTriple, Path, Args, Detected))
return false;
} else if (isRISCV(TargetArch)) {
findRISCVMultilibs(D, TargetTriple, Path, Args, Detected);
} else if (!findBiarchMultilibs(D, TargetTriple, Path, Args,
NeedsBiarchSuffix, Detected)) {
return false;
}
Multilibs = Detected.Multilibs;
SelectedMultilib = Detected.SelectedMultilib;
BiarchSibling = Detected.BiarchSibling;
return true;
}
void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
const llvm::Triple &TargetTriple, const ArgList &Args,
const std::string &LibDir, StringRef CandidateTriple,
bool NeedsBiarchSuffix) {
llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
// Locations relative to the system lib directory where GCC's triple-specific
// directories might reside.
struct GCCLibSuffix {
// Path from system lib directory to GCC triple-specific directory.
std::string LibSuffix;
// Path from GCC triple-specific directory back to system lib directory.
// This is one '..' component per component in LibSuffix.
StringRef ReversePath;
// Whether this library suffix is relevant for the triple.
bool Active;
} Suffixes[] = {
// This is the normal place.
{"gcc/" + CandidateTriple.str(), "../..", true},
// Debian puts cross-compilers in gcc-cross.
{"gcc-cross/" + CandidateTriple.str(), "../..",
TargetTriple.getOS() != llvm::Triple::Solaris},
// The Freescale PPC SDK has the gcc libraries in
// <sysroot>/usr/lib/<triple>/x.y.z so have a look there as well. Only do
// this on Freescale triples, though, since some systems put a *lot* of
// files in that location, not just GCC installation data.
{CandidateTriple.str(), "..",
TargetTriple.getVendor() == llvm::Triple::Freescale ||
TargetTriple.getVendor() == llvm::Triple::OpenEmbedded},
// Natively multiarch systems sometimes put the GCC triple-specific
// directory within their multiarch lib directory, resulting in the
// triple appearing twice.
{CandidateTriple.str() + "/gcc/" + CandidateTriple.str(), "../../..",
TargetTriple.getOS() != llvm::Triple::Solaris},
// Deal with cases (on Ubuntu) where the system architecture could be i386
// but the GCC target architecture could be (say) i686.
// FIXME: It may be worthwhile to generalize this and look for a second
// triple.
{"i386-linux-gnu/gcc/" + CandidateTriple.str(), "../../..",
(TargetArch == llvm::Triple::x86 &&
TargetTriple.getOS() != llvm::Triple::Solaris)}};
for (auto &Suffix : Suffixes) {
if (!Suffix.Active)
continue;
StringRef LibSuffix = Suffix.LibSuffix;
std::error_code EC;
for (vfs::directory_iterator
LI = D.getVFS().dir_begin(LibDir + "/" + LibSuffix, EC),
LE;
!EC && LI != LE; LI = LI.increment(EC)) {
StringRef VersionText = llvm::sys::path::filename(LI->getName());
GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
if (CandidateVersion.Major != -1) // Filter obviously bad entries.
if (!CandidateGCCInstallPaths.insert(LI->getName()).second)
continue; // Saw this path before; no need to look at it again.
if (CandidateVersion.isOlderThan(4, 1, 1))
continue;
if (CandidateVersion <= Version)
continue;
if (!ScanGCCForMultilibs(TargetTriple, Args, LI->getName(),
NeedsBiarchSuffix))
continue;
Version = CandidateVersion;
GCCTriple.setTriple(CandidateTriple);
// FIXME: We hack together the directory name here instead of
// using LI to ensure stable path separators across Windows and
// Linux.
GCCInstallPath = (LibDir + "/" + LibSuffix + "/" + VersionText).str();
GCCParentLibPath = (GCCInstallPath + "/../" + Suffix.ReversePath).str();
IsValid = true;
}
}
}
bool Generic_GCC::GCCInstallationDetector::ScanGentooConfigs(
const llvm::Triple &TargetTriple, const ArgList &Args,
const SmallVectorImpl<StringRef> &CandidateTriples,
const SmallVectorImpl<StringRef> &CandidateBiarchTriples) {
for (StringRef CandidateTriple : CandidateTriples) {
if (ScanGentooGccConfig(TargetTriple, Args, CandidateTriple))
return true;
}
for (StringRef CandidateTriple : CandidateBiarchTriples) {
if (ScanGentooGccConfig(TargetTriple, Args, CandidateTriple, true))
return true;
}
return false;
}
bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
const llvm::Triple &TargetTriple, const ArgList &Args,
StringRef CandidateTriple, bool NeedsBiarchSuffix) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
D.getVFS().getBufferForFile(D.SysRoot + "/etc/env.d/gcc/config-" +
CandidateTriple.str());
if (File) {
SmallVector<StringRef, 2> Lines;
File.get()->getBuffer().split(Lines, "\n");
for (StringRef Line : Lines) {
Line = Line.trim();
// CURRENT=triple-version
if (!Line.consume_front("CURRENT="))
continue;
// Process the config file pointed to by CURRENT.
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> ConfigFile =
D.getVFS().getBufferForFile(D.SysRoot + "/etc/env.d/gcc/" +
Line.str());
std::pair<StringRef, StringRef> ActiveVersion = Line.rsplit('-');
// List of paths to scan for libraries.
SmallVector<StringRef, 4> GentooScanPaths;
// Scan the Config file to find installed GCC libraries path.
// Typical content of the GCC config file:
// LDPATH="/usr/lib/gcc/x86_64-pc-linux-gnu/4.9.x:/usr/lib/gcc/
// (continued from previous line) x86_64-pc-linux-gnu/4.9.x/32"
// MANPATH="/usr/share/gcc-data/x86_64-pc-linux-gnu/4.9.x/man"
// INFOPATH="/usr/share/gcc-data/x86_64-pc-linux-gnu/4.9.x/info"
// STDCXX_INCDIR="/usr/lib/gcc/x86_64-pc-linux-gnu/4.9.x/include/g++-v4"
// We are looking for the paths listed in LDPATH=... .
if (ConfigFile) {
SmallVector<StringRef, 2> ConfigLines;
ConfigFile.get()->getBuffer().split(ConfigLines, "\n");
for (StringRef ConfLine : ConfigLines) {
ConfLine = ConfLine.trim();
if (ConfLine.consume_front("LDPATH=")) {
// Drop '"' from front and back if present.
ConfLine.consume_back("\"");
ConfLine.consume_front("\"");
// Get all paths sperated by ':'
ConfLine.split(GentooScanPaths, ':', -1, /*AllowEmpty*/ false);
}
}
}
// Test the path based on the version in /etc/env.d/gcc/config-{tuple}.
std::string basePath = "/usr/lib/gcc/" + ActiveVersion.first.str() + "/"
+ ActiveVersion.second.str();
GentooScanPaths.push_back(StringRef(basePath));
// Scan all paths for GCC libraries.
for (const auto &GentooScanPath : GentooScanPaths) {
std::string GentooPath = D.SysRoot + std::string(GentooScanPath);
if (D.getVFS().exists(GentooPath + "/crtbegin.o")) {
if (!ScanGCCForMultilibs(TargetTriple, Args, GentooPath,
NeedsBiarchSuffix))
continue;
Version = GCCVersion::Parse(ActiveVersion.second);
GCCInstallPath = GentooPath;
GCCParentLibPath = GentooPath + std::string("/../../..");
GCCTriple.setTriple(ActiveVersion.first);
IsValid = true;
return true;
}
}
}
}
return false;
}
Generic_GCC::Generic_GCC(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args), GCCInstallation(D),
CudaInstallation(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
}
Generic_GCC::~Generic_GCC() {}
Tool *Generic_GCC::getTool(Action::ActionClass AC) const {
switch (AC) {
case Action::PreprocessJobClass:
if (!Preprocess)
Preprocess.reset(new clang::driver::tools::gcc::Preprocessor(*this));
return Preprocess.get();
case Action::CompileJobClass:
if (!Compile)
Compile.reset(new tools::gcc::Compiler(*this));
return Compile.get();
default:
return ToolChain::getTool(AC);
}
}
Tool *Generic_GCC::buildAssembler() const {
return new tools::gnutools::Assembler(*this);
}
Tool *Generic_GCC::buildLinker() const { return new tools::gcc::Linker(*this); }
void Generic_GCC::printVerboseInfo(raw_ostream &OS) const {
// Print the information about how we detected the GCC installation.
GCCInstallation.print(OS);
CudaInstallation.print(OS);
}
bool Generic_GCC::IsUnwindTablesDefault(const ArgList &Args) const {
return getArch() == llvm::Triple::x86_64;
}
bool Generic_GCC::isPICDefault() const {
switch (getArch()) {
case llvm::Triple::x86_64:
return getTriple().isOSWindows();
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
return !getTriple().isOSBinFormatMachO() && !getTriple().isMacOSX();
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
return true;
default:
return false;
}
}
bool Generic_GCC::isPIEDefault() const { return false; }
bool Generic_GCC::isPICDefaultForced() const {
return getArch() == llvm::Triple::x86_64 && getTriple().isOSWindows();
}
bool Generic_GCC::IsIntegratedAssemblerDefault() const {
switch (getTriple().getArch()) {
case llvm::Triple::x86:
case llvm::Triple::x86_64:
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::avr:
case llvm::Triple::bpfel:
case llvm::Triple::bpfeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
case llvm::Triple::systemz:
case llvm::Triple::mips:
case llvm::Triple::mipsel:
return true;
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
// Enabled for Debian, Android, FreeBSD and OpenBSD mips64/mipsel, as they
// can precisely identify the ABI in use (Debian) or only use N64 for MIPS64
// (Android). Other targets are unable to distinguish N32 from N64.
if (getTriple().getEnvironment() == llvm::Triple::GNUABI64 ||
getTriple().isAndroid() ||
getTriple().isOSFreeBSD() ||
getTriple().isOSOpenBSD())
return true;
return false;
default:
return false;
}
}
void Generic_GCC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
DriverArgs.hasArg(options::OPT_nostdincxx))
return;
switch (GetCXXStdlibType(DriverArgs)) {
case ToolChain::CST_Libcxx:
addLibCxxIncludePaths(DriverArgs, CC1Args);
break;
case ToolChain::CST_Libstdcxx:
addLibStdCxxIncludePaths(DriverArgs, CC1Args);
break;
}
}
void
Generic_GCC::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
// FIXME: The Linux behavior would probaby be a better approach here.
addSystemInclude(DriverArgs, CC1Args,
getDriver().SysRoot + "/usr/include/c++/v1");
}
void
Generic_GCC::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
// By default, we don't assume we know where libstdc++ might be installed.
// FIXME: If we have a valid GCCInstallation, use it.
}
/// Helper to add the variant paths of a libstdc++ installation.
bool Generic_GCC::addLibStdCXXIncludePaths(
Twine Base, Twine Suffix, StringRef GCCTriple, StringRef GCCMultiarchTriple,
StringRef TargetMultiarchTriple, Twine IncludeSuffix,
const ArgList &DriverArgs, ArgStringList &CC1Args) const {
if (!getVFS().exists(Base + Suffix))
return false;
addSystemInclude(DriverArgs, CC1Args, Base + Suffix);
// The vanilla GCC layout of libstdc++ headers uses a triple subdirectory. If
// that path exists or we have neither a GCC nor target multiarch triple, use
// this vanilla search path.
if ((GCCMultiarchTriple.empty() && TargetMultiarchTriple.empty()) ||
getVFS().exists(Base + Suffix + "/" + GCCTriple + IncludeSuffix)) {
addSystemInclude(DriverArgs, CC1Args,
Base + Suffix + "/" + GCCTriple + IncludeSuffix);
} else {
// Otherwise try to use multiarch naming schemes which have normalized the
// triples and put the triple before the suffix.
//
// GCC surprisingly uses *both* the GCC triple with a multilib suffix and
// the target triple, so we support that here.
addSystemInclude(DriverArgs, CC1Args,
Base + "/" + GCCMultiarchTriple + Suffix + IncludeSuffix);
addSystemInclude(DriverArgs, CC1Args,
Base + "/" + TargetMultiarchTriple + Suffix);
}
addSystemInclude(DriverArgs, CC1Args, Base + Suffix + "/backward");
return true;
}
llvm::opt::DerivedArgList *
Generic_GCC::TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef,
Action::OffloadKind DeviceOffloadKind) const {
// If this tool chain is used for an OpenMP offloading device we have to make
// sure we always generate a shared library regardless of the commands the
// user passed to the host. This is required because the runtime library
// is required to load the device image dynamically at run time.
if (DeviceOffloadKind == Action::OFK_OpenMP) {
DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
const OptTable &Opts = getDriver().getOpts();
// Request the shared library. Given that these options are decided
// implicitly, they do not refer to any base argument.
DAL->AddFlagArg(/*BaseArg=*/nullptr, Opts.getOption(options::OPT_shared));
DAL->AddFlagArg(/*BaseArg=*/nullptr, Opts.getOption(options::OPT_fPIC));
// Filter all the arguments we don't care passing to the offloading
// toolchain as they can mess up with the creation of a shared library.
for (auto *A : Args) {
switch ((options::ID)A->getOption().getID()) {
default:
DAL->append(A);
break;
case options::OPT_shared:
case options::OPT_dynamic:
case options::OPT_static:
case options::OPT_fPIC:
case options::OPT_fno_PIC:
case options::OPT_fpic:
case options::OPT_fno_pic:
case options::OPT_fPIE:
case options::OPT_fno_PIE:
case options::OPT_fpie:
case options::OPT_fno_pie:
break;
}
}
return DAL;
}
return nullptr;
}
void Generic_ELF::anchor() {}
void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args,
Action::OffloadKind) const {
const Generic_GCC::GCCVersion &V = GCCInstallation.getVersion();
bool UseInitArrayDefault =
getTriple().getArch() == llvm::Triple::aarch64 ||
getTriple().getArch() == llvm::Triple::aarch64_be ||
(getTriple().getOS() == llvm::Triple::FreeBSD &&
getTriple().getOSMajorVersion() >= 12) ||
(getTriple().getOS() == llvm::Triple::Linux &&
((!GCCInstallation.isValid() || !V.isOlderThan(4, 7, 0)) ||
getTriple().isAndroid())) ||
getTriple().getOS() == llvm::Triple::NaCl ||
(getTriple().getVendor() == llvm::Triple::MipsTechnologies &&
!getTriple().hasEnvironment()) ||
getTriple().getOS() == llvm::Triple::Solaris;
if (DriverArgs.hasFlag(options::OPT_fuse_init_array,
options::OPT_fno_use_init_array, UseInitArrayDefault))
CC1Args.push_back("-fuse-init-array");
}
Index: projects/clang700-import/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp (revision 340125)
@@ -1,17195 +1,17231 @@
//===--- SemaDecl.cpp - Semantic Analysis for Declarations ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements semantic analysis for declarations.
//
//===----------------------------------------------------------------------===//
#include "TypeLocBuilder.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/CommentDiagnostic.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/HeaderSearch.h" // TODO: Sema shouldn't depend on Lex
#include "clang/Lex/Lexer.h" // TODO: Extract static functions to fix layering.
#include "clang/Lex/ModuleLoader.h" // TODO: Sema shouldn't depend on Lex
#include "clang/Lex/Preprocessor.h" // Included for isCodeCompletionEnabled()
#include "clang/Sema/CXXFieldCollector.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Triple.h"
#include <algorithm>
#include <cstring>
#include <functional>
using namespace clang;
using namespace sema;
Sema::DeclGroupPtrTy Sema::ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType) {
if (OwnedType) {
Decl *Group[2] = { OwnedType, Ptr };
return DeclGroupPtrTy::make(DeclGroupRef::Create(Context, Group, 2));
}
return DeclGroupPtrTy::make(DeclGroupRef(Ptr));
}
namespace {
class TypeNameValidatorCCC : public CorrectionCandidateCallback {
public:
TypeNameValidatorCCC(bool AllowInvalid, bool WantClass = false,
bool AllowTemplates = false,
bool AllowNonTemplates = true)
: AllowInvalidDecl(AllowInvalid), WantClassName(WantClass),
AllowTemplates(AllowTemplates), AllowNonTemplates(AllowNonTemplates) {
WantExpressionKeywords = false;
WantCXXNamedCasts = false;
WantRemainingKeywords = false;
}
bool ValidateCandidate(const TypoCorrection &candidate) override {
if (NamedDecl *ND = candidate.getCorrectionDecl()) {
if (!AllowInvalidDecl && ND->isInvalidDecl())
return false;
if (getAsTypeTemplateDecl(ND))
return AllowTemplates;
bool IsType = isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND);
if (!IsType)
return false;
if (AllowNonTemplates)
return true;
// An injected-class-name of a class template (specialization) is valid
// as a template or as a non-template.
if (AllowTemplates) {
auto *RD = dyn_cast<CXXRecordDecl>(ND);
if (!RD || !RD->isInjectedClassName())
return false;
RD = cast<CXXRecordDecl>(RD->getDeclContext());
return RD->getDescribedClassTemplate() ||
isa<ClassTemplateSpecializationDecl>(RD);
}
return false;
}
return !WantClassName && candidate.isKeyword();
}
private:
bool AllowInvalidDecl;
bool WantClassName;
bool AllowTemplates;
bool AllowNonTemplates;
};
} // end anonymous namespace
/// Determine whether the token kind starts a simple-type-specifier.
bool Sema::isSimpleTypeSpecifier(tok::TokenKind Kind) const {
switch (Kind) {
// FIXME: Take into account the current language when deciding whether a
// token kind is a valid type specifier
case tok::kw_short:
case tok::kw_long:
case tok::kw___int64:
case tok::kw___int128:
case tok::kw_signed:
case tok::kw_unsigned:
case tok::kw_void:
case tok::kw_char:
case tok::kw_int:
case tok::kw_half:
case tok::kw_float:
case tok::kw_double:
case tok::kw__Float16:
case tok::kw___float128:
case tok::kw_wchar_t:
case tok::kw_bool:
case tok::kw___underlying_type:
case tok::kw___auto_type:
return true;
case tok::annot_typename:
case tok::kw_char16_t:
case tok::kw_char32_t:
case tok::kw_typeof:
case tok::annot_decltype:
case tok::kw_decltype:
return getLangOpts().CPlusPlus;
case tok::kw_char8_t:
return getLangOpts().Char8;
default:
break;
}
return false;
}
namespace {
enum class UnqualifiedTypeNameLookupResult {
NotFound,
FoundNonType,
FoundType
};
} // end anonymous namespace
/// Tries to perform unqualified lookup of the type decls in bases for
/// dependent class.
/// \return \a NotFound if no any decls is found, \a FoundNotType if found not a
/// type decl, \a FoundType if only type decls are found.
static UnqualifiedTypeNameLookupResult
lookupUnqualifiedTypeNameInBase(Sema &S, const IdentifierInfo &II,
SourceLocation NameLoc,
const CXXRecordDecl *RD) {
if (!RD->hasDefinition())
return UnqualifiedTypeNameLookupResult::NotFound;
// Look for type decls in base classes.
UnqualifiedTypeNameLookupResult FoundTypeDecl =
UnqualifiedTypeNameLookupResult::NotFound;
for (const auto &Base : RD->bases()) {
const CXXRecordDecl *BaseRD = nullptr;
if (auto *BaseTT = Base.getType()->getAs<TagType>())
BaseRD = BaseTT->getAsCXXRecordDecl();
else if (auto *TST = Base.getType()->getAs<TemplateSpecializationType>()) {
// Look for type decls in dependent base classes that have known primary
// templates.
if (!TST || !TST->isDependentType())
continue;
auto *TD = TST->getTemplateName().getAsTemplateDecl();
if (!TD)
continue;
if (auto *BasePrimaryTemplate =
dyn_cast_or_null<CXXRecordDecl>(TD->getTemplatedDecl())) {
if (BasePrimaryTemplate->getCanonicalDecl() != RD->getCanonicalDecl())
BaseRD = BasePrimaryTemplate;
else if (auto *CTD = dyn_cast<ClassTemplateDecl>(TD)) {
if (const ClassTemplatePartialSpecializationDecl *PS =
CTD->findPartialSpecialization(Base.getType()))
if (PS->getCanonicalDecl() != RD->getCanonicalDecl())
BaseRD = PS;
}
}
}
if (BaseRD) {
for (NamedDecl *ND : BaseRD->lookup(&II)) {
if (!isa<TypeDecl>(ND))
return UnqualifiedTypeNameLookupResult::FoundNonType;
FoundTypeDecl = UnqualifiedTypeNameLookupResult::FoundType;
}
if (FoundTypeDecl == UnqualifiedTypeNameLookupResult::NotFound) {
switch (lookupUnqualifiedTypeNameInBase(S, II, NameLoc, BaseRD)) {
case UnqualifiedTypeNameLookupResult::FoundNonType:
return UnqualifiedTypeNameLookupResult::FoundNonType;
case UnqualifiedTypeNameLookupResult::FoundType:
FoundTypeDecl = UnqualifiedTypeNameLookupResult::FoundType;
break;
case UnqualifiedTypeNameLookupResult::NotFound:
break;
}
}
}
}
return FoundTypeDecl;
}
static ParsedType recoverFromTypeInKnownDependentBase(Sema &S,
const IdentifierInfo &II,
SourceLocation NameLoc) {
// Lookup in the parent class template context, if any.
const CXXRecordDecl *RD = nullptr;
UnqualifiedTypeNameLookupResult FoundTypeDecl =
UnqualifiedTypeNameLookupResult::NotFound;
for (DeclContext *DC = S.CurContext;
DC && FoundTypeDecl == UnqualifiedTypeNameLookupResult::NotFound;
DC = DC->getParent()) {
// Look for type decls in dependent base classes that have known primary
// templates.
RD = dyn_cast<CXXRecordDecl>(DC);
if (RD && RD->getDescribedClassTemplate())
FoundTypeDecl = lookupUnqualifiedTypeNameInBase(S, II, NameLoc, RD);
}
if (FoundTypeDecl != UnqualifiedTypeNameLookupResult::FoundType)
return nullptr;
// We found some types in dependent base classes. Recover as if the user
// wrote 'typename MyClass::II' instead of 'II'. We'll fully resolve the
// lookup during template instantiation.
S.Diag(NameLoc, diag::ext_found_via_dependent_bases_lookup) << &II;
ASTContext &Context = S.Context;
auto *NNS = NestedNameSpecifier::Create(Context, nullptr, false,
cast<Type>(Context.getRecordType(RD)));
QualType T = Context.getDependentNameType(ETK_Typename, NNS, &II);
CXXScopeSpec SS;
SS.MakeTrivial(Context, NNS, SourceRange(NameLoc));
TypeLocBuilder Builder;
DependentNameTypeLoc DepTL = Builder.push<DependentNameTypeLoc>(T);
DepTL.setNameLoc(NameLoc);
DepTL.setElaboratedKeywordLoc(SourceLocation());
DepTL.setQualifierLoc(SS.getWithLocInContext(Context));
return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
/// If the identifier refers to a type name within this scope,
/// return the declaration of that type.
///
/// This routine performs ordinary name lookup of the identifier II
/// within the given scope, with optional C++ scope specifier SS, to
/// determine whether the name refers to a type. If so, returns an
/// opaque pointer (actually a QualType) corresponding to that
/// type. Otherwise, returns NULL.
ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS,
bool isClassName, bool HasTrailingDot,
ParsedType ObjectTypePtr,
bool IsCtorOrDtorName,
bool WantNontrivialTypeSourceInfo,
bool IsClassTemplateDeductionContext,
IdentifierInfo **CorrectedII) {
// FIXME: Consider allowing this outside C++1z mode as an extension.
bool AllowDeducedTemplate = IsClassTemplateDeductionContext &&
getLangOpts().CPlusPlus17 && !IsCtorOrDtorName &&
!isClassName && !HasTrailingDot;
// Determine where we will perform name lookup.
DeclContext *LookupCtx = nullptr;
if (ObjectTypePtr) {
QualType ObjectType = ObjectTypePtr.get();
if (ObjectType->isRecordType())
LookupCtx = computeDeclContext(ObjectType);
} else if (SS && SS->isNotEmpty()) {
LookupCtx = computeDeclContext(*SS, false);
if (!LookupCtx) {
if (isDependentScopeSpecifier(*SS)) {
// C++ [temp.res]p3:
// A qualified-id that refers to a type and in which the
// nested-name-specifier depends on a template-parameter (14.6.2)
// shall be prefixed by the keyword typename to indicate that the
// qualified-id denotes a type, forming an
// elaborated-type-specifier (7.1.5.3).
//
// We therefore do not perform any name lookup if the result would
// refer to a member of an unknown specialization.
if (!isClassName && !IsCtorOrDtorName)
return nullptr;
// We know from the grammar that this name refers to a type,
// so build a dependent node to describe the type.
if (WantNontrivialTypeSourceInfo)
return ActOnTypenameType(S, SourceLocation(), *SS, II, NameLoc).get();
NestedNameSpecifierLoc QualifierLoc = SS->getWithLocInContext(Context);
QualType T = CheckTypenameType(ETK_None, SourceLocation(), QualifierLoc,
II, NameLoc);
return ParsedType::make(T);
}
return nullptr;
}
if (!LookupCtx->isDependentContext() &&
RequireCompleteDeclContext(*SS, LookupCtx))
return nullptr;
}
// FIXME: LookupNestedNameSpecifierName isn't the right kind of
// lookup for class-names.
LookupNameKind Kind = isClassName ? LookupNestedNameSpecifierName :
LookupOrdinaryName;
LookupResult Result(*this, &II, NameLoc, Kind);
if (LookupCtx) {
// Perform "qualified" name lookup into the declaration context we
// computed, which is either the type of the base of a member access
// expression or the declaration context associated with a prior
// nested-name-specifier.
LookupQualifiedName(Result, LookupCtx);
if (ObjectTypePtr && Result.empty()) {
// C++ [basic.lookup.classref]p3:
// If the unqualified-id is ~type-name, the type-name is looked up
// in the context of the entire postfix-expression. If the type T of
// the object expression is of a class type C, the type-name is also
// looked up in the scope of class C. At least one of the lookups shall
// find a name that refers to (possibly cv-qualified) T.
LookupName(Result, S);
}
} else {
// Perform unqualified name lookup.
LookupName(Result, S);
// For unqualified lookup in a class template in MSVC mode, look into
// dependent base classes where the primary class template is known.
if (Result.empty() && getLangOpts().MSVCCompat && (!SS || SS->isEmpty())) {
if (ParsedType TypeInBase =
recoverFromTypeInKnownDependentBase(*this, II, NameLoc))
return TypeInBase;
}
}
NamedDecl *IIDecl = nullptr;
switch (Result.getResultKind()) {
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
if (CorrectedII) {
TypoCorrection Correction =
CorrectTypo(Result.getLookupNameInfo(), Kind, S, SS,
llvm::make_unique<TypeNameValidatorCCC>(
true, isClassName, AllowDeducedTemplate),
CTK_ErrorRecovery);
IdentifierInfo *NewII = Correction.getCorrectionAsIdentifierInfo();
TemplateTy Template;
bool MemberOfUnknownSpecialization;
UnqualifiedId TemplateName;
TemplateName.setIdentifier(NewII, NameLoc);
NestedNameSpecifier *NNS = Correction.getCorrectionSpecifier();
CXXScopeSpec NewSS, *NewSSPtr = SS;
if (SS && NNS) {
NewSS.MakeTrivial(Context, NNS, SourceRange(NameLoc));
NewSSPtr = &NewSS;
}
if (Correction && (NNS || NewII != &II) &&
// Ignore a correction to a template type as the to-be-corrected
// identifier is not a template (typo correction for template names
// is handled elsewhere).
!(getLangOpts().CPlusPlus && NewSSPtr &&
isTemplateName(S, *NewSSPtr, false, TemplateName, nullptr, false,
Template, MemberOfUnknownSpecialization))) {
ParsedType Ty = getTypeName(*NewII, NameLoc, S, NewSSPtr,
isClassName, HasTrailingDot, ObjectTypePtr,
IsCtorOrDtorName,
WantNontrivialTypeSourceInfo,
IsClassTemplateDeductionContext);
if (Ty) {
diagnoseTypo(Correction,
PDiag(diag::err_unknown_type_or_class_name_suggest)
<< Result.getLookupName() << isClassName);
if (SS && NNS)
SS->MakeTrivial(Context, NNS, SourceRange(NameLoc));
*CorrectedII = NewII;
return Ty;
}
}
}
// If typo correction failed or was not performed, fall through
LLVM_FALLTHROUGH;
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue:
Result.suppressDiagnostics();
return nullptr;
case LookupResult::Ambiguous:
// Recover from type-hiding ambiguities by hiding the type. We'll
// do the lookup again when looking for an object, and we can
// diagnose the error then. If we don't do this, then the error
// about hiding the type will be immediately followed by an error
// that only makes sense if the identifier was treated like a type.
if (Result.getAmbiguityKind() == LookupResult::AmbiguousTagHiding) {
Result.suppressDiagnostics();
return nullptr;
}
// Look to see if we have a type anywhere in the list of results.
for (LookupResult::iterator Res = Result.begin(), ResEnd = Result.end();
Res != ResEnd; ++Res) {
if (isa<TypeDecl>(*Res) || isa<ObjCInterfaceDecl>(*Res) ||
(AllowDeducedTemplate && getAsTypeTemplateDecl(*Res))) {
if (!IIDecl ||
(*Res)->getLocation().getRawEncoding() <
IIDecl->getLocation().getRawEncoding())
IIDecl = *Res;
}
}
if (!IIDecl) {
// None of the entities we found is a type, so there is no way
// to even assume that the result is a type. In this case, don't
// complain about the ambiguity. The parser will either try to
// perform this lookup again (e.g., as an object name), which
// will produce the ambiguity, or will complain that it expected
// a type name.
Result.suppressDiagnostics();
return nullptr;
}
// We found a type within the ambiguous lookup; diagnose the
// ambiguity and then return that type. This might be the right
// answer, or it might not be, but it suppresses any attempt to
// perform the name lookup again.
break;
case LookupResult::Found:
IIDecl = Result.getFoundDecl();
break;
}
assert(IIDecl && "Didn't find decl");
QualType T;
if (TypeDecl *TD = dyn_cast<TypeDecl>(IIDecl)) {
// C++ [class.qual]p2: A lookup that would find the injected-class-name
// instead names the constructors of the class, except when naming a class.
// This is ill-formed when we're not actually forming a ctor or dtor name.
auto *LookupRD = dyn_cast_or_null<CXXRecordDecl>(LookupCtx);
auto *FoundRD = dyn_cast<CXXRecordDecl>(TD);
if (!isClassName && !IsCtorOrDtorName && LookupRD && FoundRD &&
FoundRD->isInjectedClassName() &&
declaresSameEntity(LookupRD, cast<Decl>(FoundRD->getParent())))
Diag(NameLoc, diag::err_out_of_line_qualified_id_type_names_constructor)
<< &II << /*Type*/1;
DiagnoseUseOfDecl(IIDecl, NameLoc);
T = Context.getTypeDeclType(TD);
MarkAnyDeclReferenced(TD->getLocation(), TD, /*OdrUse=*/false);
} else if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) {
(void)DiagnoseUseOfDecl(IDecl, NameLoc);
if (!HasTrailingDot)
T = Context.getObjCInterfaceType(IDecl);
} else if (AllowDeducedTemplate) {
if (auto *TD = getAsTypeTemplateDecl(IIDecl))
T = Context.getDeducedTemplateSpecializationType(TemplateName(TD),
QualType(), false);
}
if (T.isNull()) {
// If it's not plausibly a type, suppress diagnostics.
Result.suppressDiagnostics();
return nullptr;
}
// NOTE: avoid constructing an ElaboratedType(Loc) if this is a
// constructor or destructor name (in such a case, the scope specifier
// will be attached to the enclosing Expr or Decl node).
if (SS && SS->isNotEmpty() && !IsCtorOrDtorName &&
!isa<ObjCInterfaceDecl>(IIDecl)) {
if (WantNontrivialTypeSourceInfo) {
// Construct a type with type-source information.
TypeLocBuilder Builder;
Builder.pushTypeSpec(T).setNameLoc(NameLoc);
T = getElaboratedType(ETK_None, *SS, T);
ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
ElabTL.setElaboratedKeywordLoc(SourceLocation());
ElabTL.setQualifierLoc(SS->getWithLocInContext(Context));
return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
} else {
T = getElaboratedType(ETK_None, *SS, T);
}
}
return ParsedType::make(T);
}
// Builds a fake NNS for the given decl context.
static NestedNameSpecifier *
synthesizeCurrentNestedNameSpecifier(ASTContext &Context, DeclContext *DC) {
for (;; DC = DC->getLookupParent()) {
DC = DC->getPrimaryContext();
auto *ND = dyn_cast<NamespaceDecl>(DC);
if (ND && !ND->isInline() && !ND->isAnonymousNamespace())
return NestedNameSpecifier::Create(Context, nullptr, ND);
else if (auto *RD = dyn_cast<CXXRecordDecl>(DC))
return NestedNameSpecifier::Create(Context, nullptr, RD->isTemplateDecl(),
RD->getTypeForDecl());
else if (isa<TranslationUnitDecl>(DC))
return NestedNameSpecifier::GlobalSpecifier(Context);
}
llvm_unreachable("something isn't in TU scope?");
}
/// Find the parent class with dependent bases of the innermost enclosing method
/// context. Do not look for enclosing CXXRecordDecls directly, or we will end
/// up allowing unqualified dependent type names at class-level, which MSVC
/// correctly rejects.
static const CXXRecordDecl *
findRecordWithDependentBasesOfEnclosingMethod(const DeclContext *DC) {
for (; DC && DC->isDependentContext(); DC = DC->getLookupParent()) {
DC = DC->getPrimaryContext();
if (const auto *MD = dyn_cast<CXXMethodDecl>(DC))
if (MD->getParent()->hasAnyDependentBases())
return MD->getParent();
}
return nullptr;
}
ParsedType Sema::ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg) {
assert(getLangOpts().MSVCCompat && "shouldn't be called in non-MSVC mode");
NestedNameSpecifier *NNS = nullptr;
if (IsTemplateTypeArg && getCurScope()->isTemplateParamScope()) {
// If we weren't able to parse a default template argument, delay lookup
// until instantiation time by making a non-dependent DependentTypeName. We
// pretend we saw a NestedNameSpecifier referring to the current scope, and
// lookup is retried.
// FIXME: This hurts our diagnostic quality, since we get errors like "no
// type named 'Foo' in 'current_namespace'" when the user didn't write any
// name specifiers.
NNS = synthesizeCurrentNestedNameSpecifier(Context, CurContext);
Diag(NameLoc, diag::ext_ms_delayed_template_argument) << &II;
} else if (const CXXRecordDecl *RD =
findRecordWithDependentBasesOfEnclosingMethod(CurContext)) {
// Build a DependentNameType that will perform lookup into RD at
// instantiation time.
NNS = NestedNameSpecifier::Create(Context, nullptr, RD->isTemplateDecl(),
RD->getTypeForDecl());
// Diagnose that this identifier was undeclared, and retry the lookup during
// template instantiation.
Diag(NameLoc, diag::ext_undeclared_unqual_id_with_dependent_base) << &II
<< RD;
} else {
// This is not a situation that we should recover from.
return ParsedType();
}
QualType T = Context.getDependentNameType(ETK_None, NNS, &II);
// Build type location information. We synthesized the qualifier, so we have
// to build a fake NestedNameSpecifierLoc.
NestedNameSpecifierLocBuilder NNSLocBuilder;
NNSLocBuilder.MakeTrivial(Context, NNS, SourceRange(NameLoc));
NestedNameSpecifierLoc QualifierLoc = NNSLocBuilder.getWithLocInContext(Context);
TypeLocBuilder Builder;
DependentNameTypeLoc DepTL = Builder.push<DependentNameTypeLoc>(T);
DepTL.setNameLoc(NameLoc);
DepTL.setElaboratedKeywordLoc(SourceLocation());
DepTL.setQualifierLoc(QualifierLoc);
return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
/// isTagName() - This method is called *for error recovery purposes only*
/// to determine if the specified name is a valid tag name ("struct foo"). If
/// so, this returns the TST for the tag corresponding to it (TST_enum,
/// TST_union, TST_struct, TST_interface, TST_class). This is used to diagnose
/// cases in C where the user forgot to specify the tag.
DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
// Do a tag name lookup in this scope.
LookupResult R(*this, &II, SourceLocation(), LookupTagName);
LookupName(R, S, false);
R.suppressDiagnostics();
if (R.getResultKind() == LookupResult::Found)
if (const TagDecl *TD = R.getAsSingle<TagDecl>()) {
switch (TD->getTagKind()) {
case TTK_Struct: return DeclSpec::TST_struct;
case TTK_Interface: return DeclSpec::TST_interface;
case TTK_Union: return DeclSpec::TST_union;
case TTK_Class: return DeclSpec::TST_class;
case TTK_Enum: return DeclSpec::TST_enum;
}
}
return DeclSpec::TST_unspecified;
}
/// isMicrosoftMissingTypename - In Microsoft mode, within class scope,
/// if a CXXScopeSpec's type is equal to the type of one of the base classes
/// then downgrade the missing typename error to a warning.
/// This is needed for MSVC compatibility; Example:
/// @code
/// template<class T> class A {
/// public:
/// typedef int TYPE;
/// };
/// template<class T> class B : public A<T> {
/// public:
/// A<T>::TYPE a; // no typename required because A<T> is a base class.
/// };
/// @endcode
bool Sema::isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S) {
if (CurContext->isRecord()) {
if (SS->getScopeRep()->getKind() == NestedNameSpecifier::Super)
return true;
const Type *Ty = SS->getScopeRep()->getAsType();
CXXRecordDecl *RD = cast<CXXRecordDecl>(CurContext);
for (const auto &Base : RD->bases())
if (Ty && Context.hasSameUnqualifiedType(QualType(Ty, 1), Base.getType()))
return true;
return S->isFunctionPrototypeScope();
}
return CurContext->isFunctionOrMethod() || S->isFunctionPrototypeScope();
}
void Sema::DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName) {
// Don't report typename errors for editor placeholders.
if (II->isEditorPlaceholder())
return;
// We don't have anything to suggest (yet).
SuggestedType = nullptr;
// There may have been a typo in the name of the type. Look up typo
// results, in case we have something that we can suggest.
if (TypoCorrection Corrected =
CorrectTypo(DeclarationNameInfo(II, IILoc), LookupOrdinaryName, S, SS,
llvm::make_unique<TypeNameValidatorCCC>(
false, false, IsTemplateName, !IsTemplateName),
CTK_ErrorRecovery)) {
// FIXME: Support error recovery for the template-name case.
bool CanRecover = !IsTemplateName;
if (Corrected.isKeyword()) {
// We corrected to a keyword.
diagnoseTypo(Corrected,
PDiag(IsTemplateName ? diag::err_no_template_suggest
: diag::err_unknown_typename_suggest)
<< II);
II = Corrected.getCorrectionAsIdentifierInfo();
} else {
// We found a similarly-named type or interface; suggest that.
if (!SS || !SS->isSet()) {
diagnoseTypo(Corrected,
PDiag(IsTemplateName ? diag::err_no_template_suggest
: diag::err_unknown_typename_suggest)
<< II, CanRecover);
} else if (DeclContext *DC = computeDeclContext(*SS, false)) {
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
II->getName().equals(CorrectedStr);
diagnoseTypo(Corrected,
PDiag(IsTemplateName
? diag::err_no_member_template_suggest
: diag::err_unknown_nested_typename_suggest)
<< II << DC << DroppedSpecifier << SS->getRange(),
CanRecover);
} else {
llvm_unreachable("could not have corrected a typo here");
}
if (!CanRecover)
return;
CXXScopeSpec tmpSS;
if (Corrected.getCorrectionSpecifier())
tmpSS.MakeTrivial(Context, Corrected.getCorrectionSpecifier(),
SourceRange(IILoc));
// FIXME: Support class template argument deduction here.
SuggestedType =
getTypeName(*Corrected.getCorrectionAsIdentifierInfo(), IILoc, S,
tmpSS.isSet() ? &tmpSS : SS, false, false, nullptr,
/*IsCtorOrDtorName=*/false,
/*NonTrivialTypeSourceInfo=*/true);
}
return;
}
if (getLangOpts().CPlusPlus && !IsTemplateName) {
// See if II is a class template that the user forgot to pass arguments to.
UnqualifiedId Name;
Name.setIdentifier(II, IILoc);
CXXScopeSpec EmptySS;
TemplateTy TemplateResult;
bool MemberOfUnknownSpecialization;
if (isTemplateName(S, SS ? *SS : EmptySS, /*hasTemplateKeyword=*/false,
Name, nullptr, true, TemplateResult,
MemberOfUnknownSpecialization) == TNK_Type_template) {
diagnoseMissingTemplateArguments(TemplateResult.get(), IILoc);
return;
}
}
// FIXME: Should we move the logic that tries to recover from a missing tag
// (struct, union, enum) from Parser::ParseImplicitInt here, instead?
if (!SS || (!SS->isSet() && !SS->isInvalid()))
Diag(IILoc, IsTemplateName ? diag::err_no_template
: diag::err_unknown_typename)
<< II;
else if (DeclContext *DC = computeDeclContext(*SS, false))
Diag(IILoc, IsTemplateName ? diag::err_no_member_template
: diag::err_typename_nested_not_found)
<< II << DC << SS->getRange();
else if (isDependentScopeSpecifier(*SS)) {
unsigned DiagID = diag::err_typename_missing;
if (getLangOpts().MSVCCompat && isMicrosoftMissingTypename(SS, S))
DiagID = diag::ext_typename_missing;
Diag(SS->getRange().getBegin(), DiagID)
<< SS->getScopeRep() << II->getName()
<< SourceRange(SS->getRange().getBegin(), IILoc)
<< FixItHint::CreateInsertion(SS->getRange().getBegin(), "typename ");
SuggestedType = ActOnTypenameType(S, SourceLocation(),
*SS, *II, IILoc).get();
} else {
assert(SS && SS->isInvalid() &&
"Invalid scope specifier has already been diagnosed");
}
}
/// Determine whether the given result set contains either a type name
/// or
static bool isResultTypeOrTemplate(LookupResult &R, const Token &NextToken) {
bool CheckTemplate = R.getSema().getLangOpts().CPlusPlus &&
NextToken.is(tok::less);
for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I) {
if (isa<TypeDecl>(*I) || isa<ObjCInterfaceDecl>(*I))
return true;
if (CheckTemplate && isa<TemplateDecl>(*I))
return true;
}
return false;
}
static bool isTagTypeWithMissingTag(Sema &SemaRef, LookupResult &Result,
Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name,
SourceLocation NameLoc) {
LookupResult R(SemaRef, Name, NameLoc, Sema::LookupTagName);
SemaRef.LookupParsedName(R, S, &SS);
if (TagDecl *Tag = R.getAsSingle<TagDecl>()) {
StringRef FixItTagName;
switch (Tag->getTagKind()) {
case TTK_Class:
FixItTagName = "class ";
break;
case TTK_Enum:
FixItTagName = "enum ";
break;
case TTK_Struct:
FixItTagName = "struct ";
break;
case TTK_Interface:
FixItTagName = "__interface ";
break;
case TTK_Union:
FixItTagName = "union ";
break;
}
StringRef TagName = FixItTagName.drop_back();
SemaRef.Diag(NameLoc, diag::err_use_of_tag_name_without_tag)
<< Name << TagName << SemaRef.getLangOpts().CPlusPlus
<< FixItHint::CreateInsertion(NameLoc, FixItTagName);
for (LookupResult::iterator I = Result.begin(), IEnd = Result.end();
I != IEnd; ++I)
SemaRef.Diag((*I)->getLocation(), diag::note_decl_hiding_tag_type)
<< Name << TagName;
// Replace lookup results with just the tag decl.
Result.clear(Sema::LookupTagName);
SemaRef.LookupParsedName(Result, S, &SS);
return true;
}
return false;
}
/// Build a ParsedType for a simple-type-specifier with a nested-name-specifier.
static ParsedType buildNestedType(Sema &S, CXXScopeSpec &SS,
QualType T, SourceLocation NameLoc) {
ASTContext &Context = S.Context;
TypeLocBuilder Builder;
Builder.pushTypeSpec(T).setNameLoc(NameLoc);
T = S.getElaboratedType(ETK_None, SS, T);
ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
ElabTL.setElaboratedKeywordLoc(SourceLocation());
ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
return S.CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
Sema::NameClassification
Sema::ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
SourceLocation NameLoc, const Token &NextToken,
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC) {
DeclarationNameInfo NameInfo(Name, NameLoc);
ObjCMethodDecl *CurMethod = getCurMethodDecl();
if (NextToken.is(tok::coloncolon)) {
NestedNameSpecInfo IdInfo(Name, NameLoc, NextToken.getLocation());
BuildCXXNestedNameSpecifier(S, IdInfo, false, SS, nullptr, false);
} else if (getLangOpts().CPlusPlus && SS.isSet() &&
isCurrentClassName(*Name, S, &SS)) {
// Per [class.qual]p2, this names the constructors of SS, not the
// injected-class-name. We don't have a classification for that.
// There's not much point caching this result, since the parser
// will reject it later.
return NameClassification::Unknown();
}
LookupResult Result(*this, Name, NameLoc, LookupOrdinaryName);
LookupParsedName(Result, S, &SS, !CurMethod);
// For unqualified lookup in a class template in MSVC mode, look into
// dependent base classes where the primary class template is known.
if (Result.empty() && SS.isEmpty() && getLangOpts().MSVCCompat) {
if (ParsedType TypeInBase =
recoverFromTypeInKnownDependentBase(*this, *Name, NameLoc))
return TypeInBase;
}
// Perform lookup for Objective-C instance variables (including automatically
// synthesized instance variables), if we're in an Objective-C method.
// FIXME: This lookup really, really needs to be folded in to the normal
// unqualified lookup mechanism.
if (!SS.isSet() && CurMethod && !isResultTypeOrTemplate(Result, NextToken)) {
ExprResult E = LookupInObjCMethod(Result, S, Name, true);
if (E.get() || E.isInvalid())
return E;
}
bool SecondTry = false;
bool IsFilteredTemplateName = false;
Corrected:
switch (Result.getResultKind()) {
case LookupResult::NotFound:
// If an unqualified-id is followed by a '(', then we have a function
// call.
if (!SS.isSet() && NextToken.is(tok::l_paren)) {
// In C++, this is an ADL-only call.
// FIXME: Reference?
if (getLangOpts().CPlusPlus)
return BuildDeclarationNameExpr(SS, Result, /*ADL=*/true);
// C90 6.3.2.2:
// If the expression that precedes the parenthesized argument list in a
// function call consists solely of an identifier, and if no
// declaration is visible for this identifier, the identifier is
// implicitly declared exactly as if, in the innermost block containing
// the function call, the declaration
//
// extern int identifier ();
//
// appeared.
//
// We also allow this in C99 as an extension.
if (NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *Name, S)) {
Result.addDecl(D);
Result.resolveKind();
return BuildDeclarationNameExpr(SS, Result, /*ADL=*/false);
}
}
// In C, we first see whether there is a tag type by the same name, in
// which case it's likely that the user just forgot to write "enum",
// "struct", or "union".
if (!getLangOpts().CPlusPlus && !SecondTry &&
isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
break;
}
// Perform typo correction to determine if there is another name that is
// close to this name.
if (!SecondTry && CCC) {
SecondTry = true;
if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(),
Result.getLookupKind(), S,
&SS, std::move(CCC),
CTK_ErrorRecovery)) {
unsigned UnqualifiedDiag = diag::err_undeclared_var_use_suggest;
unsigned QualifiedDiag = diag::err_no_member_suggest;
NamedDecl *FirstDecl = Corrected.getFoundDecl();
NamedDecl *UnderlyingFirstDecl = Corrected.getCorrectionDecl();
if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
UnderlyingFirstDecl && isa<TemplateDecl>(UnderlyingFirstDecl)) {
UnqualifiedDiag = diag::err_no_template_suggest;
QualifiedDiag = diag::err_no_member_template_suggest;
} else if (UnderlyingFirstDecl &&
(isa<TypeDecl>(UnderlyingFirstDecl) ||
isa<ObjCInterfaceDecl>(UnderlyingFirstDecl) ||
isa<ObjCCompatibleAliasDecl>(UnderlyingFirstDecl))) {
UnqualifiedDiag = diag::err_unknown_typename_suggest;
QualifiedDiag = diag::err_unknown_nested_typename_suggest;
}
if (SS.isEmpty()) {
diagnoseTypo(Corrected, PDiag(UnqualifiedDiag) << Name);
} else {// FIXME: is this even reachable? Test it.
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
Name->getName().equals(CorrectedStr);
diagnoseTypo(Corrected, PDiag(QualifiedDiag)
<< Name << computeDeclContext(SS, false)
<< DroppedSpecifier << SS.getRange());
}
// Update the name, so that the caller has the new name.
Name = Corrected.getCorrectionAsIdentifierInfo();
// Typo correction corrected to a keyword.
if (Corrected.isKeyword())
return Name;
// Also update the LookupResult...
// FIXME: This should probably go away at some point
Result.clear();
Result.setLookupName(Corrected.getCorrection());
if (FirstDecl)
Result.addDecl(FirstDecl);
// If we found an Objective-C instance variable, let
// LookupInObjCMethod build the appropriate expression to
// reference the ivar.
// FIXME: This is a gross hack.
if (ObjCIvarDecl *Ivar = Result.getAsSingle<ObjCIvarDecl>()) {
Result.clear();
ExprResult E(LookupInObjCMethod(Result, S, Ivar->getIdentifier()));
return E;
}
goto Corrected;
}
}
// We failed to correct; just fall through and let the parser deal with it.
Result.suppressDiagnostics();
return NameClassification::Unknown();
case LookupResult::NotFoundInCurrentInstantiation: {
// We performed name lookup into the current instantiation, and there were
// dependent bases, so we treat this result the same way as any other
// dependent nested-name-specifier.
// C++ [temp.res]p2:
// A name used in a template declaration or definition and that is
// dependent on a template-parameter is assumed not to name a type
// unless the applicable name lookup finds a type name or the name is
// qualified by the keyword typename.
//
// FIXME: If the next token is '<', we might want to ask the parser to
// perform some heroics to see if we actually have a
// template-argument-list, which would indicate a missing 'template'
// keyword here.
return ActOnDependentIdExpression(SS, /*TemplateKWLoc=*/SourceLocation(),
NameInfo, IsAddressOfOperand,
/*TemplateArgs=*/nullptr);
}
case LookupResult::Found:
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue:
break;
case LookupResult::Ambiguous:
if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
hasAnyAcceptableTemplateNames(Result)) {
// C++ [temp.local]p3:
// A lookup that finds an injected-class-name (10.2) can result in an
// ambiguity in certain cases (for example, if it is found in more than
// one base class). If all of the injected-class-names that are found
// refer to specializations of the same class template, and if the name
// is followed by a template-argument-list, the reference refers to the
// class template itself and not a specialization thereof, and is not
// ambiguous.
//
// This filtering can make an ambiguous result into an unambiguous one,
// so try again after filtering out template names.
FilterAcceptableTemplateNames(Result);
if (!Result.isAmbiguous()) {
IsFilteredTemplateName = true;
break;
}
}
// Diagnose the ambiguity and return an error.
return NameClassification::Error();
}
if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
(IsFilteredTemplateName || hasAnyAcceptableTemplateNames(Result))) {
// C++ [temp.names]p3:
// After name lookup (3.4) finds that a name is a template-name or that
// an operator-function-id or a literal- operator-id refers to a set of
// overloaded functions any member of which is a function template if
// this is followed by a <, the < is always taken as the delimiter of a
// template-argument-list and never as the less-than operator.
if (!IsFilteredTemplateName)
FilterAcceptableTemplateNames(Result);
if (!Result.empty()) {
bool IsFunctionTemplate;
bool IsVarTemplate;
TemplateName Template;
if (Result.end() - Result.begin() > 1) {
IsFunctionTemplate = true;
Template = Context.getOverloadedTemplateName(Result.begin(),
Result.end());
} else {
TemplateDecl *TD
= cast<TemplateDecl>((*Result.begin())->getUnderlyingDecl());
IsFunctionTemplate = isa<FunctionTemplateDecl>(TD);
IsVarTemplate = isa<VarTemplateDecl>(TD);
if (SS.isSet() && !SS.isInvalid())
Template = Context.getQualifiedTemplateName(SS.getScopeRep(),
/*TemplateKeyword=*/false,
TD);
else
Template = TemplateName(TD);
}
if (IsFunctionTemplate) {
// Function templates always go through overload resolution, at which
// point we'll perform the various checks (e.g., accessibility) we need
// to based on which function we selected.
Result.suppressDiagnostics();
return NameClassification::FunctionTemplate(Template);
}
return IsVarTemplate ? NameClassification::VarTemplate(Template)
: NameClassification::TypeTemplate(Template);
}
}
NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl();
if (TypeDecl *Type = dyn_cast<TypeDecl>(FirstDecl)) {
DiagnoseUseOfDecl(Type, NameLoc);
MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
QualType T = Context.getTypeDeclType(Type);
if (SS.isNotEmpty())
return buildNestedType(*this, SS, T, NameLoc);
return ParsedType::make(T);
}
ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(FirstDecl);
if (!Class) {
// FIXME: It's unfortunate that we don't have a Type node for handling this.
if (ObjCCompatibleAliasDecl *Alias =
dyn_cast<ObjCCompatibleAliasDecl>(FirstDecl))
Class = Alias->getClassInterface();
}
if (Class) {
DiagnoseUseOfDecl(Class, NameLoc);
if (NextToken.is(tok::period)) {
// Interface. <something> is parsed as a property reference expression.
// Just return "unknown" as a fall-through for now.
Result.suppressDiagnostics();
return NameClassification::Unknown();
}
QualType T = Context.getObjCInterfaceType(Class);
return ParsedType::make(T);
}
// We can have a type template here if we're classifying a template argument.
if (isa<TemplateDecl>(FirstDecl) && !isa<FunctionTemplateDecl>(FirstDecl) &&
!isa<VarTemplateDecl>(FirstDecl))
return NameClassification::TypeTemplate(
TemplateName(cast<TemplateDecl>(FirstDecl)));
// Check for a tag type hidden by a non-type decl in a few cases where it
// seems likely a type is wanted instead of the non-type that was found.
bool NextIsOp = NextToken.isOneOf(tok::amp, tok::star);
if ((NextToken.is(tok::identifier) ||
(NextIsOp &&
FirstDecl->getUnderlyingDecl()->isFunctionOrFunctionTemplate())) &&
isTagTypeWithMissingTag(*this, Result, S, SS, Name, NameLoc)) {
TypeDecl *Type = Result.getAsSingle<TypeDecl>();
DiagnoseUseOfDecl(Type, NameLoc);
QualType T = Context.getTypeDeclType(Type);
if (SS.isNotEmpty())
return buildNestedType(*this, SS, T, NameLoc);
return ParsedType::make(T);
}
if (FirstDecl->isCXXClassMember())
return BuildPossibleImplicitMemberExpr(SS, SourceLocation(), Result,
nullptr, S);
bool ADL = UseArgumentDependentLookup(SS, Result, NextToken.is(tok::l_paren));
return BuildDeclarationNameExpr(SS, Result, ADL);
}
Sema::TemplateNameKindForDiagnostics
Sema::getTemplateNameKindForDiagnostics(TemplateName Name) {
auto *TD = Name.getAsTemplateDecl();
if (!TD)
return TemplateNameKindForDiagnostics::DependentTemplate;
if (isa<ClassTemplateDecl>(TD))
return TemplateNameKindForDiagnostics::ClassTemplate;
if (isa<FunctionTemplateDecl>(TD))
return TemplateNameKindForDiagnostics::FunctionTemplate;
if (isa<VarTemplateDecl>(TD))
return TemplateNameKindForDiagnostics::VarTemplate;
if (isa<TypeAliasTemplateDecl>(TD))
return TemplateNameKindForDiagnostics::AliasTemplate;
if (isa<TemplateTemplateParmDecl>(TD))
return TemplateNameKindForDiagnostics::TemplateTemplateParam;
return TemplateNameKindForDiagnostics::DependentTemplate;
}
// Determines the context to return to after temporarily entering a
// context. This depends in an unnecessarily complicated way on the
// exact ordering of callbacks from the parser.
DeclContext *Sema::getContainingDC(DeclContext *DC) {
// Functions defined inline within classes aren't parsed until we've
// finished parsing the top-level class, so the top-level class is
// the context we'll need to return to.
// A Lambda call operator whose parent is a class must not be treated
// as an inline member function. A Lambda can be used legally
// either as an in-class member initializer or a default argument. These
// are parsed once the class has been marked complete and so the containing
// context would be the nested class (when the lambda is defined in one);
// If the class is not complete, then the lambda is being used in an
// ill-formed fashion (such as to specify the width of a bit-field, or
// in an array-bound) - in which case we still want to return the
// lexically containing DC (which could be a nested class).
if (isa<FunctionDecl>(DC) && !isLambdaCallOperator(DC)) {
DC = DC->getLexicalParent();
// A function not defined within a class will always return to its
// lexical context.
if (!isa<CXXRecordDecl>(DC))
return DC;
// A C++ inline method/friend is parsed *after* the topmost class
// it was declared in is fully parsed ("complete"); the topmost
// class is the context we need to return to.
while (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC->getLexicalParent()))
DC = RD;
// Return the declaration context of the topmost class the inline method is
// declared in.
return DC;
}
return DC->getLexicalParent();
}
void Sema::PushDeclContext(Scope *S, DeclContext *DC) {
assert(getContainingDC(DC) == CurContext &&
"The next DeclContext should be lexically contained in the current one.");
CurContext = DC;
S->setEntity(DC);
}
void Sema::PopDeclContext() {
assert(CurContext && "DeclContext imbalance!");
CurContext = getContainingDC(CurContext);
assert(CurContext && "Popped translation unit!");
}
Sema::SkippedDefinitionContext Sema::ActOnTagStartSkippedDefinition(Scope *S,
Decl *D) {
// Unlike PushDeclContext, the context to which we return is not necessarily
// the containing DC of TD, because the new context will be some pre-existing
// TagDecl definition instead of a fresh one.
auto Result = static_cast<SkippedDefinitionContext>(CurContext);
CurContext = cast<TagDecl>(D)->getDefinition();
assert(CurContext && "skipping definition of undefined tag");
// Start lookups from the parent of the current context; we don't want to look
// into the pre-existing complete definition.
S->setEntity(CurContext->getLookupParent());
return Result;
}
void Sema::ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context) {
CurContext = static_cast<decltype(CurContext)>(Context);
}
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
///
void Sema::EnterDeclaratorContext(Scope *S, DeclContext *DC) {
// C++0x [basic.lookup.unqual]p13:
// A name used in the definition of a static data member of class
// X (after the qualified-id of the static member) is looked up as
// if the name was used in a member function of X.
// C++0x [basic.lookup.unqual]p14:
// If a variable member of a namespace is defined outside of the
// scope of its namespace then any name used in the definition of
// the variable member (after the declarator-id) is looked up as
// if the definition of the variable member occurred in its
// namespace.
// Both of these imply that we should push a scope whose context
// is the semantic context of the declaration. We can't use
// PushDeclContext here because that context is not necessarily
// lexically contained in the current context. Fortunately,
// the containing scope should have the appropriate information.
assert(!S->getEntity() && "scope already has entity");
#ifndef NDEBUG
Scope *Ancestor = S->getParent();
while (!Ancestor->getEntity()) Ancestor = Ancestor->getParent();
assert(Ancestor->getEntity() == CurContext && "ancestor context mismatch");
#endif
CurContext = DC;
S->setEntity(DC);
}
void Sema::ExitDeclaratorContext(Scope *S) {
assert(S->getEntity() == CurContext && "Context imbalance!");
// Switch back to the lexical context. The safety of this is
// enforced by an assert in EnterDeclaratorContext.
Scope *Ancestor = S->getParent();
while (!Ancestor->getEntity()) Ancestor = Ancestor->getParent();
CurContext = Ancestor->getEntity();
// We don't need to do anything with the scope, which is going to
// disappear.
}
void Sema::ActOnReenterFunctionContext(Scope* S, Decl *D) {
// We assume that the caller has already called
// ActOnReenterTemplateScope so getTemplatedDecl() works.
FunctionDecl *FD = D->getAsFunction();
if (!FD)
return;
// Same implementation as PushDeclContext, but enters the context
// from the lexical parent, rather than the top-level class.
assert(CurContext == FD->getLexicalParent() &&
"The next DeclContext should be lexically contained in the current one.");
CurContext = FD;
S->setEntity(CurContext);
for (unsigned P = 0, NumParams = FD->getNumParams(); P < NumParams; ++P) {
ParmVarDecl *Param = FD->getParamDecl(P);
// If the parameter has an identifier, then add it to the scope
if (Param->getIdentifier()) {
S->AddDecl(Param);
IdResolver.AddDecl(Param);
}
}
}
void Sema::ActOnExitFunctionContext() {
// Same implementation as PopDeclContext, but returns to the lexical parent,
// rather than the top-level class.
assert(CurContext && "DeclContext imbalance!");
CurContext = CurContext->getLexicalParent();
assert(CurContext && "Popped translation unit!");
}
/// Determine whether we allow overloading of the function
/// PrevDecl with another declaration.
///
/// This routine determines whether overloading is possible, not
/// whether some new function is actually an overload. It will return
/// true in C++ (where we can always provide overloads) or, as an
/// extension, in C when the previous function is already an
/// overloaded function declaration or has the "overloadable"
/// attribute.
static bool AllowOverloadingOfFunction(LookupResult &Previous,
ASTContext &Context,
const FunctionDecl *New) {
if (Context.getLangOpts().CPlusPlus)
return true;
if (Previous.getResultKind() == LookupResult::FoundOverloaded)
return true;
return Previous.getResultKind() == LookupResult::Found &&
(Previous.getFoundDecl()->hasAttr<OverloadableAttr>() ||
New->hasAttr<OverloadableAttr>());
}
/// Add this decl to the scope shadowed decl chains.
void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
// Move up the scope chain until we find the nearest enclosing
// non-transparent context. The declaration will be introduced into this
// scope.
while (S->getEntity() && S->getEntity()->isTransparentContext())
S = S->getParent();
// Add scoped declarations into their context, so that they can be
// found later. Declarations without a context won't be inserted
// into any context.
if (AddToContext)
CurContext->addDecl(D);
// Out-of-line definitions shouldn't be pushed into scope in C++, unless they
// are function-local declarations.
if (getLangOpts().CPlusPlus && D->isOutOfLine() &&
!D->getDeclContext()->getRedeclContext()->Equals(
D->getLexicalDeclContext()->getRedeclContext()) &&
!D->getLexicalDeclContext()->isFunctionOrMethod())
return;
// Template instantiations should also not be pushed into scope.
if (isa<FunctionDecl>(D) &&
cast<FunctionDecl>(D)->isFunctionTemplateSpecialization())
return;
// If this replaces anything in the current scope,
IdentifierResolver::iterator I = IdResolver.begin(D->getDeclName()),
IEnd = IdResolver.end();
for (; I != IEnd; ++I) {
if (S->isDeclScope(*I) && D->declarationReplaces(*I)) {
S->RemoveDecl(*I);
IdResolver.RemoveDecl(*I);
// Should only need to replace one decl.
break;
}
}
S->AddDecl(D);
if (isa<LabelDecl>(D) && !cast<LabelDecl>(D)->isGnuLocal()) {
// Implicitly-generated labels may end up getting generated in an order that
// isn't strictly lexical, which breaks name lookup. Be careful to insert
// the label at the appropriate place in the identifier chain.
for (I = IdResolver.begin(D->getDeclName()); I != IEnd; ++I) {
DeclContext *IDC = (*I)->getLexicalDeclContext()->getRedeclContext();
if (IDC == CurContext) {
if (!S->isDeclScope(*I))
continue;
} else if (IDC->Encloses(CurContext))
break;
}
IdResolver.InsertDeclAfter(I, D);
} else {
IdResolver.AddDecl(D);
}
}
void Sema::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) {
if (IdResolver.tryAddTopLevelDecl(D, Name) && TUScope)
TUScope->AddDecl(D);
}
bool Sema::isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S,
bool AllowInlineNamespace) {
return IdResolver.isDeclInScope(D, Ctx, S, AllowInlineNamespace);
}
Scope *Sema::getScopeForDeclContext(Scope *S, DeclContext *DC) {
DeclContext *TargetDC = DC->getPrimaryContext();
do {
if (DeclContext *ScopeDC = S->getEntity())
if (ScopeDC->getPrimaryContext() == TargetDC)
return S;
} while ((S = S->getParent()));
return nullptr;
}
static bool isOutOfScopePreviousDeclaration(NamedDecl *,
DeclContext*,
ASTContext&);
/// Filters out lookup results that don't fall within the given scope
/// as determined by isDeclInScope.
void Sema::FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage,
bool AllowInlineNamespace) {
LookupResult::Filter F = R.makeFilter();
while (F.hasNext()) {
NamedDecl *D = F.next();
if (isDeclInScope(D, Ctx, S, AllowInlineNamespace))
continue;
if (ConsiderLinkage && isOutOfScopePreviousDeclaration(D, Ctx, Context))
continue;
F.erase();
}
F.done();
}
/// We've determined that \p New is a redeclaration of \p Old. Check that they
/// have compatible owning modules.
bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
// FIXME: The Modules TS is not clear about how friend declarations are
// to be treated. It's not meaningful to have different owning modules for
// linkage in redeclarations of the same entity, so for now allow the
// redeclaration and change the owning modules to match.
if (New->getFriendObjectKind() &&
Old->getOwningModuleForLinkage() != New->getOwningModuleForLinkage()) {
New->setLocalOwningModule(Old->getOwningModule());
makeMergedDefinitionVisible(New);
return false;
}
Module *NewM = New->getOwningModule();
Module *OldM = Old->getOwningModule();
if (NewM == OldM)
return false;
// FIXME: Check proclaimed-ownership-declarations here too.
bool NewIsModuleInterface = NewM && NewM->Kind == Module::ModuleInterfaceUnit;
bool OldIsModuleInterface = OldM && OldM->Kind == Module::ModuleInterfaceUnit;
if (NewIsModuleInterface || OldIsModuleInterface) {
// C++ Modules TS [basic.def.odr] 6.2/6.7 [sic]:
// if a declaration of D [...] appears in the purview of a module, all
// other such declarations shall appear in the purview of the same module
Diag(New->getLocation(), diag::err_mismatched_owning_module)
<< New
<< NewIsModuleInterface
<< (NewIsModuleInterface ? NewM->getFullModuleName() : "")
<< OldIsModuleInterface
<< (OldIsModuleInterface ? OldM->getFullModuleName() : "");
Diag(Old->getLocation(), diag::note_previous_declaration);
New->setInvalidDecl();
return true;
}
return false;
}
static bool isUsingDecl(NamedDecl *D) {
return isa<UsingShadowDecl>(D) ||
isa<UnresolvedUsingTypenameDecl>(D) ||
isa<UnresolvedUsingValueDecl>(D);
}
/// Removes using shadow declarations from the lookup results.
static void RemoveUsingDecls(LookupResult &R) {
LookupResult::Filter F = R.makeFilter();
while (F.hasNext())
if (isUsingDecl(F.next()))
F.erase();
F.done();
}
/// Check for this common pattern:
/// @code
/// class S {
/// S(const S&); // DO NOT IMPLEMENT
/// void operator=(const S&); // DO NOT IMPLEMENT
/// };
/// @endcode
static bool IsDisallowedCopyOrAssign(const CXXMethodDecl *D) {
// FIXME: Should check for private access too but access is set after we get
// the decl here.
if (D->doesThisDeclarationHaveABody())
return false;
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
return CD->isCopyConstructor();
return D->isCopyAssignmentOperator();
}
// We need this to handle
//
// typedef struct {
// void *foo() { return 0; }
// } A;
//
// When we see foo we don't know if after the typedef we will get 'A' or '*A'
// for example. If 'A', foo will have external linkage. If we have '*A',
// foo will have no linkage. Since we can't know until we get to the end
// of the typedef, this function finds out if D might have non-external linkage.
// Callers should verify at the end of the TU if it D has external linkage or
// not.
bool Sema::mightHaveNonExternalLinkage(const DeclaratorDecl *D) {
const DeclContext *DC = D->getDeclContext();
while (!DC->isTranslationUnit()) {
if (const RecordDecl *RD = dyn_cast<RecordDecl>(DC)){
if (!RD->hasNameForLinkage())
return true;
}
DC = DC->getParent();
}
return !D->isExternallyVisible();
}
// FIXME: This needs to be refactored; some other isInMainFile users want
// these semantics.
static bool isMainFileLoc(const Sema &S, SourceLocation Loc) {
if (S.TUKind != TU_Complete)
return false;
return S.SourceMgr.isInMainFile(Loc);
}
bool Sema::ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const {
assert(D);
if (D->isInvalidDecl() || D->isUsed() || D->hasAttr<UnusedAttr>())
return false;
// Ignore all entities declared within templates, and out-of-line definitions
// of members of class templates.
if (D->getDeclContext()->isDependentContext() ||
D->getLexicalDeclContext()->isDependentContext())
return false;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return false;
// A non-out-of-line declaration of a member specialization was implicitly
// instantiated; it's the out-of-line declaration that we're interested in.
if (FD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization &&
FD->getMemberSpecializationInfo() && !FD->isOutOfLine())
return false;
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (MD->isVirtual() || IsDisallowedCopyOrAssign(MD))
return false;
} else {
// 'static inline' functions are defined in headers; don't warn.
if (FD->isInlined() && !isMainFileLoc(*this, FD->getLocation()))
return false;
}
if (FD->doesThisDeclarationHaveABody() &&
Context.DeclMustBeEmitted(FD))
return false;
} else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// Constants and utility variables are defined in headers with internal
// linkage; don't warn. (Unlike functions, there isn't a convenient marker
// like "inline".)
if (!isMainFileLoc(*this, VD->getLocation()))
return false;
if (Context.DeclMustBeEmitted(VD))
return false;
if (VD->isStaticDataMember() &&
VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
return false;
if (VD->isStaticDataMember() &&
VD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization &&
VD->getMemberSpecializationInfo() && !VD->isOutOfLine())
return false;
if (VD->isInline() && !isMainFileLoc(*this, VD->getLocation()))
return false;
} else {
return false;
}
// Only warn for unused decls internal to the translation unit.
// FIXME: This seems like a bogus check; it suppresses -Wunused-function
// for inline functions defined in the main source file, for instance.
return mightHaveNonExternalLinkage(D);
}
void Sema::MarkUnusedFileScopedDecl(const DeclaratorDecl *D) {
if (!D)
return;
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
const FunctionDecl *First = FD->getFirstDecl();
if (FD != First && ShouldWarnIfUnusedFileScopedDecl(First))
return; // First should already be in the vector.
}
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
const VarDecl *First = VD->getFirstDecl();
if (VD != First && ShouldWarnIfUnusedFileScopedDecl(First))
return; // First should already be in the vector.
}
if (ShouldWarnIfUnusedFileScopedDecl(D))
UnusedFileScopedDecls.push_back(D);
}
static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
if (D->isInvalidDecl())
return false;
bool Referenced = false;
if (auto *DD = dyn_cast<DecompositionDecl>(D)) {
// For a decomposition declaration, warn if none of the bindings are
// referenced, instead of if the variable itself is referenced (which
// it is, by the bindings' expressions).
for (auto *BD : DD->bindings()) {
if (BD->isReferenced()) {
Referenced = true;
break;
}
}
} else if (!D->getDeclName()) {
return false;
} else if (D->isReferenced() || D->isUsed()) {
Referenced = true;
}
if (Referenced || D->hasAttr<UnusedAttr>() ||
D->hasAttr<ObjCPreciseLifetimeAttr>())
return false;
if (isa<LabelDecl>(D))
return true;
// Except for labels, we only care about unused decls that are local to
// functions.
bool WithinFunction = D->getDeclContext()->isFunctionOrMethod();
if (const auto *R = dyn_cast<CXXRecordDecl>(D->getDeclContext()))
// For dependent types, the diagnostic is deferred.
WithinFunction =
WithinFunction || (R->isLocalClass() && !R->isDependentType());
if (!WithinFunction)
return false;
if (isa<TypedefNameDecl>(D))
return true;
// White-list anything that isn't a local variable.
if (!isa<VarDecl>(D) || isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D))
return false;
// Types of valid local variables should be complete, so this should succeed.
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// White-list anything with an __attribute__((unused)) type.
const auto *Ty = VD->getType().getTypePtr();
// Only look at the outermost level of typedef.
if (const TypedefType *TT = Ty->getAs<TypedefType>()) {
if (TT->getDecl()->hasAttr<UnusedAttr>())
return false;
}
// If we failed to complete the type for some reason, or if the type is
// dependent, don't diagnose the variable.
if (Ty->isIncompleteType() || Ty->isDependentType())
return false;
// Look at the element type to ensure that the warning behaviour is
// consistent for both scalars and arrays.
Ty = Ty->getBaseElementTypeUnsafe();
if (const TagType *TT = Ty->getAs<TagType>()) {
const TagDecl *Tag = TT->getDecl();
if (Tag->hasAttr<UnusedAttr>())
return false;
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Tag)) {
if (!RD->hasTrivialDestructor() && !RD->hasAttr<WarnUnusedAttr>())
return false;
if (const Expr *Init = VD->getInit()) {
if (const ExprWithCleanups *Cleanups =
dyn_cast<ExprWithCleanups>(Init))
Init = Cleanups->getSubExpr();
const CXXConstructExpr *Construct =
dyn_cast<CXXConstructExpr>(Init);
if (Construct && !Construct->isElidable()) {
CXXConstructorDecl *CD = Construct->getConstructor();
if (!CD->isTrivial() && !RD->hasAttr<WarnUnusedAttr>() &&
(VD->getInit()->isValueDependent() || !VD->evaluateValue()))
return false;
}
}
}
}
// TODO: __attribute__((unused)) templates?
}
return true;
}
static void GenerateFixForUnusedDecl(const NamedDecl *D, ASTContext &Ctx,
FixItHint &Hint) {
if (isa<LabelDecl>(D)) {
SourceLocation AfterColon = Lexer::findLocationAfterToken(D->getLocEnd(),
tok::colon, Ctx.getSourceManager(), Ctx.getLangOpts(), true);
if (AfterColon.isInvalid())
return;
Hint = FixItHint::CreateRemoval(CharSourceRange::
getCharRange(D->getLocStart(), AfterColon));
}
}
void Sema::DiagnoseUnusedNestedTypedefs(const RecordDecl *D) {
if (D->getTypeForDecl()->isDependentType())
return;
for (auto *TmpD : D->decls()) {
if (const auto *T = dyn_cast<TypedefNameDecl>(TmpD))
DiagnoseUnusedDecl(T);
else if(const auto *R = dyn_cast<RecordDecl>(TmpD))
DiagnoseUnusedNestedTypedefs(R);
}
}
/// DiagnoseUnusedDecl - Emit warnings about declarations that are not used
/// unless they are marked attr(unused).
void Sema::DiagnoseUnusedDecl(const NamedDecl *D) {
if (!ShouldDiagnoseUnusedDecl(D))
return;
if (auto *TD = dyn_cast<TypedefNameDecl>(D)) {
// typedefs can be referenced later on, so the diagnostics are emitted
// at end-of-translation-unit.
UnusedLocalTypedefNameCandidates.insert(TD);
return;
}
FixItHint Hint;
GenerateFixForUnusedDecl(D, Context, Hint);
unsigned DiagID;
if (isa<VarDecl>(D) && cast<VarDecl>(D)->isExceptionVariable())
DiagID = diag::warn_unused_exception_param;
else if (isa<LabelDecl>(D))
DiagID = diag::warn_unused_label;
else
DiagID = diag::warn_unused_variable;
Diag(D->getLocation(), DiagID) << D << Hint;
}
static void CheckPoppedLabel(LabelDecl *L, Sema &S) {
// Verify that we have no forward references left. If so, there was a goto
// or address of a label taken, but no definition of it. Label fwd
// definitions are indicated with a null substmt which is also not a resolved
// MS inline assembly label name.
bool Diagnose = false;
if (L->isMSAsmLabel())
Diagnose = !L->isResolvedMSAsmLabel();
else
Diagnose = L->getStmt() == nullptr;
if (Diagnose)
S.Diag(L->getLocation(), diag::err_undeclared_label_use) <<L->getDeclName();
}
void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
S->mergeNRVOIntoParent();
if (S->decl_empty()) return;
assert((S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) &&
"Scope shouldn't contain decls!");
for (auto *TmpD : S->decls()) {
assert(TmpD && "This decl didn't get pushed??");
assert(isa<NamedDecl>(TmpD) && "Decl isn't NamedDecl?");
NamedDecl *D = cast<NamedDecl>(TmpD);
// Diagnose unused variables in this scope.
if (!S->hasUnrecoverableErrorOccurred()) {
DiagnoseUnusedDecl(D);
if (const auto *RD = dyn_cast<RecordDecl>(D))
DiagnoseUnusedNestedTypedefs(RD);
}
if (!D->getDeclName()) continue;
// If this was a forward reference to a label, verify it was defined.
if (LabelDecl *LD = dyn_cast<LabelDecl>(D))
CheckPoppedLabel(LD, *this);
// Remove this name from our lexical scope, and warn on it if we haven't
// already.
IdResolver.RemoveDecl(D);
auto ShadowI = ShadowingDecls.find(D);
if (ShadowI != ShadowingDecls.end()) {
if (const auto *FD = dyn_cast<FieldDecl>(ShadowI->second)) {
Diag(D->getLocation(), diag::warn_ctor_parm_shadows_field)
<< D << FD << FD->getParent();
Diag(FD->getLocation(), diag::note_previous_declaration);
}
ShadowingDecls.erase(ShadowI);
}
}
}
/// Look for an Objective-C class in the translation unit.
///
/// \param Id The name of the Objective-C class we're looking for. If
/// typo-correction fixes this name, the Id will be updated
/// to the fixed name.
///
/// \param IdLoc The location of the name in the translation unit.
///
/// \param DoTypoCorrection If true, this routine will attempt typo correction
/// if there is no class with the given name.
///
/// \returns The declaration of the named Objective-C class, or NULL if the
/// class could not be found.
ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool DoTypoCorrection) {
// The third "scope" argument is 0 since we aren't enabling lazy built-in
// creation from this context.
NamedDecl *IDecl = LookupSingleName(TUScope, Id, IdLoc, LookupOrdinaryName);
if (!IDecl && DoTypoCorrection) {
// Perform typo correction at the given location, but only if we
// find an Objective-C class name.
if (TypoCorrection C = CorrectTypo(
DeclarationNameInfo(Id, IdLoc), LookupOrdinaryName, TUScope, nullptr,
llvm::make_unique<DeclFilterCCC<ObjCInterfaceDecl>>(),
CTK_ErrorRecovery)) {
diagnoseTypo(C, PDiag(diag::err_undef_interface_suggest) << Id);
IDecl = C.getCorrectionDeclAs<ObjCInterfaceDecl>();
Id = IDecl->getIdentifier();
}
}
ObjCInterfaceDecl *Def = dyn_cast_or_null<ObjCInterfaceDecl>(IDecl);
// This routine must always return a class definition, if any.
if (Def && Def->getDefinition())
Def = Def->getDefinition();
return Def;
}
/// getNonFieldDeclScope - Retrieves the innermost scope, starting
/// from S, where a non-field would be declared. This routine copes
/// with the difference between C and C++ scoping rules in structs and
/// unions. For example, the following code is well-formed in C but
/// ill-formed in C++:
/// @code
/// struct S6 {
/// enum { BAR } e;
/// };
///
/// void test_S6() {
/// struct S6 a;
/// a.e = BAR;
/// }
/// @endcode
/// For the declaration of BAR, this routine will return a different
/// scope. The scope S will be the scope of the unnamed enumeration
/// within S6. In C++, this routine will return the scope associated
/// with S6, because the enumeration's scope is a transparent
/// context but structures can contain non-field names. In C, this
/// routine will return the translation unit scope, since the
/// enumeration's scope is a transparent context and structures cannot
/// contain non-field names.
Scope *Sema::getNonFieldDeclScope(Scope *S) {
while (((S->getFlags() & Scope::DeclScope) == 0) ||
(S->getEntity() && S->getEntity()->isTransparentContext()) ||
(S->isClassScope() && !getLangOpts().CPlusPlus))
S = S->getParent();
return S;
}
/// Looks up the declaration of "struct objc_super" and
/// saves it for later use in building builtin declaration of
/// objc_msgSendSuper and objc_msgSendSuper_stret. If no such
/// pre-existing declaration exists no action takes place.
static void LookupPredefedObjCSuperType(Sema &ThisSema, Scope *S,
IdentifierInfo *II) {
if (!II->isStr("objc_msgSendSuper"))
return;
ASTContext &Context = ThisSema.Context;
LookupResult Result(ThisSema, &Context.Idents.get("objc_super"),
SourceLocation(), Sema::LookupTagName);
ThisSema.LookupName(Result, S);
if (Result.getResultKind() == LookupResult::Found)
if (const TagDecl *TD = Result.getAsSingle<TagDecl>())
Context.setObjCSuperType(Context.getTagDeclType(TD));
}
static StringRef getHeaderName(ASTContext::GetBuiltinTypeError Error) {
switch (Error) {
case ASTContext::GE_None:
return "";
case ASTContext::GE_Missing_stdio:
return "stdio.h";
case ASTContext::GE_Missing_setjmp:
return "setjmp.h";
case ASTContext::GE_Missing_ucontext:
return "ucontext.h";
}
llvm_unreachable("unhandled error kind");
}
/// LazilyCreateBuiltin - The specified Builtin-ID was first used at
/// file scope. lazily create a decl for it. ForRedeclaration is true
/// if we're creating this built-in in anticipation of redeclaring the
/// built-in.
NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc) {
LookupPredefedObjCSuperType(*this, S, II);
ASTContext::GetBuiltinTypeError Error;
QualType R = Context.GetBuiltinType(ID, Error);
if (Error) {
if (ForRedeclaration)
Diag(Loc, diag::warn_implicit_decl_requires_sysheader)
<< getHeaderName(Error) << Context.BuiltinInfo.getName(ID);
return nullptr;
}
if (!ForRedeclaration &&
(Context.BuiltinInfo.isPredefinedLibFunction(ID) ||
Context.BuiltinInfo.isHeaderDependentFunction(ID))) {
Diag(Loc, diag::ext_implicit_lib_function_decl)
<< Context.BuiltinInfo.getName(ID) << R;
if (Context.BuiltinInfo.getHeaderName(ID) &&
!Diags.isIgnored(diag::ext_implicit_lib_function_decl, Loc))
Diag(Loc, diag::note_include_header_or_declare)
<< Context.BuiltinInfo.getHeaderName(ID)
<< Context.BuiltinInfo.getName(ID);
}
if (R.isNull())
return nullptr;
DeclContext *Parent = Context.getTranslationUnitDecl();
if (getLangOpts().CPlusPlus) {
LinkageSpecDecl *CLinkageDecl =
LinkageSpecDecl::Create(Context, Parent, Loc, Loc,
LinkageSpecDecl::lang_c, false);
CLinkageDecl->setImplicit();
Parent->addDecl(CLinkageDecl);
Parent = CLinkageDecl;
}
FunctionDecl *New = FunctionDecl::Create(Context,
Parent,
Loc, Loc, II, R, /*TInfo=*/nullptr,
SC_Extern,
false,
R->isFunctionProtoType());
New->setImplicit();
// Create Decl objects for each parameter, adding them to the
// FunctionDecl.
if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(R)) {
SmallVector<ParmVarDecl*, 16> Params;
for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
ParmVarDecl *parm =
ParmVarDecl::Create(Context, New, SourceLocation(), SourceLocation(),
nullptr, FT->getParamType(i), /*TInfo=*/nullptr,
SC_None, nullptr);
parm->setScopeInfo(0, i);
Params.push_back(parm);
}
New->setParams(Params);
}
AddKnownFunctionAttributes(New);
RegisterLocallyScopedExternCDecl(New, S);
// TUScope is the translation-unit scope to insert this function into.
// FIXME: This is hideous. We need to teach PushOnScopeChains to
// relate Scopes to DeclContexts, and probably eliminate CurContext
// entirely, but we're not there yet.
DeclContext *SavedContext = CurContext;
CurContext = Parent;
PushOnScopeChains(New, TUScope);
CurContext = SavedContext;
return New;
}
/// Typedef declarations don't have linkage, but they still denote the same
/// entity if their types are the same.
/// FIXME: This is notionally doing the same thing as ASTReaderDecl's
/// isSameEntity.
static void filterNonConflictingPreviousTypedefDecls(Sema &S,
TypedefNameDecl *Decl,
LookupResult &Previous) {
// This is only interesting when modules are enabled.
if (!S.getLangOpts().Modules && !S.getLangOpts().ModulesLocalVisibility)
return;
// Empty sets are uninteresting.
if (Previous.empty())
return;
LookupResult::Filter Filter = Previous.makeFilter();
while (Filter.hasNext()) {
NamedDecl *Old = Filter.next();
// Non-hidden declarations are never ignored.
if (S.isVisible(Old))
continue;
// Declarations of the same entity are not ignored, even if they have
// different linkages.
if (auto *OldTD = dyn_cast<TypedefNameDecl>(Old)) {
if (S.Context.hasSameType(OldTD->getUnderlyingType(),
Decl->getUnderlyingType()))
continue;
// If both declarations give a tag declaration a typedef name for linkage
// purposes, then they declare the same entity.
if (OldTD->getAnonDeclWithTypedefName(/*AnyRedecl*/true) &&
Decl->getAnonDeclWithTypedefName())
continue;
}
Filter.erase();
}
Filter.done();
}
bool Sema::isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New) {
QualType OldType;
if (TypedefNameDecl *OldTypedef = dyn_cast<TypedefNameDecl>(Old))
OldType = OldTypedef->getUnderlyingType();
else
OldType = Context.getTypeDeclType(Old);
QualType NewType = New->getUnderlyingType();
if (NewType->isVariablyModifiedType()) {
// Must not redefine a typedef with a variably-modified type.
int Kind = isa<TypeAliasDecl>(Old) ? 1 : 0;
Diag(New->getLocation(), diag::err_redefinition_variably_modified_typedef)
<< Kind << NewType;
if (Old->getLocation().isValid())
notePreviousDefinition(Old, New->getLocation());
New->setInvalidDecl();
return true;
}
if (OldType != NewType &&
!OldType->isDependentType() &&
!NewType->isDependentType() &&
!Context.hasSameType(OldType, NewType)) {
int Kind = isa<TypeAliasDecl>(Old) ? 1 : 0;
Diag(New->getLocation(), diag::err_redefinition_different_typedef)
<< Kind << NewType << OldType;
if (Old->getLocation().isValid())
notePreviousDefinition(Old, New->getLocation());
New->setInvalidDecl();
return true;
}
return false;
}
/// MergeTypedefNameDecl - We just parsed a typedef 'New' which has the
/// same name and scope as a previous declaration 'Old'. Figure out
/// how to resolve this situation, merging decls or emitting
/// diagnostics as appropriate. If there was an error, set New to be invalid.
///
void Sema::MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls) {
// If the new decl is known invalid already, don't bother doing any
// merging checks.
if (New->isInvalidDecl()) return;
// Allow multiple definitions for ObjC built-in typedefs.
// FIXME: Verify the underlying types are equivalent!
if (getLangOpts().ObjC1) {
const IdentifierInfo *TypeID = New->getIdentifier();
switch (TypeID->getLength()) {
default: break;
case 2:
{
if (!TypeID->isStr("id"))
break;
QualType T = New->getUnderlyingType();
if (!T->isPointerType())
break;
if (!T->isVoidPointerType()) {
QualType PT = T->getAs<PointerType>()->getPointeeType();
if (!PT->isStructureType())
break;
}
Context.setObjCIdRedefinitionType(T);
// Install the built-in type for 'id', ignoring the current definition.
New->setTypeForDecl(Context.getObjCIdType().getTypePtr());
return;
}
case 5:
if (!TypeID->isStr("Class"))
break;
Context.setObjCClassRedefinitionType(New->getUnderlyingType());
// Install the built-in type for 'Class', ignoring the current definition.
New->setTypeForDecl(Context.getObjCClassType().getTypePtr());
return;
case 3:
if (!TypeID->isStr("SEL"))
break;
Context.setObjCSelRedefinitionType(New->getUnderlyingType());
// Install the built-in type for 'SEL', ignoring the current definition.
New->setTypeForDecl(Context.getObjCSelType().getTypePtr());
return;
}
// Fall through - the typedef name was not a builtin type.
}
// Verify the old decl was also a type.
TypeDecl *Old = OldDecls.getAsSingle<TypeDecl>();
if (!Old) {
Diag(New->getLocation(), diag::err_redefinition_different_kind)
<< New->getDeclName();
NamedDecl *OldD = OldDecls.getRepresentativeDecl();
if (OldD->getLocation().isValid())
notePreviousDefinition(OldD, New->getLocation());
return New->setInvalidDecl();
}
// If the old declaration is invalid, just give up here.
if (Old->isInvalidDecl())
return New->setInvalidDecl();
if (auto *OldTD = dyn_cast<TypedefNameDecl>(Old)) {
auto *OldTag = OldTD->getAnonDeclWithTypedefName(/*AnyRedecl*/true);
auto *NewTag = New->getAnonDeclWithTypedefName();
NamedDecl *Hidden = nullptr;
if (OldTag && NewTag &&
OldTag->getCanonicalDecl() != NewTag->getCanonicalDecl() &&
!hasVisibleDefinition(OldTag, &Hidden)) {
// There is a definition of this tag, but it is not visible. Use it
// instead of our tag.
New->setTypeForDecl(OldTD->getTypeForDecl());
if (OldTD->isModed())
New->setModedTypeSourceInfo(OldTD->getTypeSourceInfo(),
OldTD->getUnderlyingType());
else
New->setTypeSourceInfo(OldTD->getTypeSourceInfo());
// Make the old tag definition visible.
makeMergedDefinitionVisible(Hidden);
// If this was an unscoped enumeration, yank all of its enumerators
// out of the scope.
if (isa<EnumDecl>(NewTag)) {
Scope *EnumScope = getNonFieldDeclScope(S);
for (auto *D : NewTag->decls()) {
auto *ED = cast<EnumConstantDecl>(D);
assert(EnumScope->isDeclScope(ED));
EnumScope->RemoveDecl(ED);
IdResolver.RemoveDecl(ED);
ED->getLexicalDeclContext()->removeDecl(ED);
}
}
}
}
// If the typedef types are not identical, reject them in all languages and
// with any extensions enabled.
if (isIncompatibleTypedef(Old, New))
return;
// The types match. Link up the redeclaration chain and merge attributes if
// the old declaration was a typedef.
if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Old)) {
New->setPreviousDecl(Typedef);
mergeDeclAttributes(New, Old);
}
if (getLangOpts().MicrosoftExt)
return;
if (getLangOpts().CPlusPlus) {
// C++ [dcl.typedef]p2:
// In a given non-class scope, a typedef specifier can be used to
// redefine the name of any type declared in that scope to refer
// to the type to which it already refers.
if (!isa<CXXRecordDecl>(CurContext))
return;
// C++0x [dcl.typedef]p4:
// In a given class scope, a typedef specifier can be used to redefine
// any class-name declared in that scope that is not also a typedef-name
// to refer to the type to which it already refers.
//
// This wording came in via DR424, which was a correction to the
// wording in DR56, which accidentally banned code like:
//
// struct S {
// typedef struct A { } A;
// };
//
// in the C++03 standard. We implement the C++0x semantics, which
// allow the above but disallow
//
// struct S {
// typedef int I;
// typedef int I;
// };
//
// since that was the intent of DR56.
if (!isa<TypedefNameDecl>(Old))
return;
Diag(New->getLocation(), diag::err_redefinition)
<< New->getDeclName();
notePreviousDefinition(Old, New->getLocation());
return New->setInvalidDecl();
}
// Modules always permit redefinition of typedefs, as does C11.
if (getLangOpts().Modules || getLangOpts().C11)
return;
// If we have a redefinition of a typedef in C, emit a warning. This warning
// is normally mapped to an error, but can be controlled with
// -Wtypedef-redefinition. If either the original or the redefinition is
// in a system header, don't emit this for compatibility with GCC.
if (getDiagnostics().getSuppressSystemWarnings() &&
// Some standard types are defined implicitly in Clang (e.g. OpenCL).
(Old->isImplicit() ||
Context.getSourceManager().isInSystemHeader(Old->getLocation()) ||
Context.getSourceManager().isInSystemHeader(New->getLocation())))
return;
Diag(New->getLocation(), diag::ext_redefinition_of_typedef)
<< New->getDeclName();
notePreviousDefinition(Old, New->getLocation());
}
/// DeclhasAttr - returns true if decl Declaration already has the target
/// attribute.
static bool DeclHasAttr(const Decl *D, const Attr *A) {
const OwnershipAttr *OA = dyn_cast<OwnershipAttr>(A);
const AnnotateAttr *Ann = dyn_cast<AnnotateAttr>(A);
for (const auto *i : D->attrs())
if (i->getKind() == A->getKind()) {
if (Ann) {
if (Ann->getAnnotation() == cast<AnnotateAttr>(i)->getAnnotation())
return true;
continue;
}
// FIXME: Don't hardcode this check
if (OA && isa<OwnershipAttr>(i))
return OA->getOwnKind() == cast<OwnershipAttr>(i)->getOwnKind();
return true;
}
return false;
}
static bool isAttributeTargetADefinition(Decl *D) {
if (VarDecl *VD = dyn_cast<VarDecl>(D))
return VD->isThisDeclarationADefinition();
if (TagDecl *TD = dyn_cast<TagDecl>(D))
return TD->isCompleteDefinition() || TD->isBeingDefined();
return true;
}
/// Merge alignment attributes from \p Old to \p New, taking into account the
/// special semantics of C11's _Alignas specifier and C++11's alignas attribute.
///
/// \return \c true if any attributes were added to \p New.
static bool mergeAlignedAttrs(Sema &S, NamedDecl *New, Decl *Old) {
// Look for alignas attributes on Old, and pick out whichever attribute
// specifies the strictest alignment requirement.
AlignedAttr *OldAlignasAttr = nullptr;
AlignedAttr *OldStrictestAlignAttr = nullptr;
unsigned OldAlign = 0;
for (auto *I : Old->specific_attrs<AlignedAttr>()) {
// FIXME: We have no way of representing inherited dependent alignments
// in a case like:
// template<int A, int B> struct alignas(A) X;
// template<int A, int B> struct alignas(B) X {};
// For now, we just ignore any alignas attributes which are not on the
// definition in such a case.
if (I->isAlignmentDependent())
return false;
if (I->isAlignas())
OldAlignasAttr = I;
unsigned Align = I->getAlignment(S.Context);
if (Align > OldAlign) {
OldAlign = Align;
OldStrictestAlignAttr = I;
}
}
// Look for alignas attributes on New.
AlignedAttr *NewAlignasAttr = nullptr;
unsigned NewAlign = 0;
for (auto *I : New->specific_attrs<AlignedAttr>()) {
if (I->isAlignmentDependent())
return false;
if (I->isAlignas())
NewAlignasAttr = I;
unsigned Align = I->getAlignment(S.Context);
if (Align > NewAlign)
NewAlign = Align;
}
if (OldAlignasAttr && NewAlignasAttr && OldAlign != NewAlign) {
// Both declarations have 'alignas' attributes. We require them to match.
// C++11 [dcl.align]p6 and C11 6.7.5/7 both come close to saying this, but
// fall short. (If two declarations both have alignas, they must both match
// every definition, and so must match each other if there is a definition.)
// If either declaration only contains 'alignas(0)' specifiers, then it
// specifies the natural alignment for the type.
if (OldAlign == 0 || NewAlign == 0) {
QualType Ty;
if (ValueDecl *VD = dyn_cast<ValueDecl>(New))
Ty = VD->getType();
else
Ty = S.Context.getTagDeclType(cast<TagDecl>(New));
if (OldAlign == 0)
OldAlign = S.Context.getTypeAlign(Ty);
if (NewAlign == 0)
NewAlign = S.Context.getTypeAlign(Ty);
}
if (OldAlign != NewAlign) {
S.Diag(NewAlignasAttr->getLocation(), diag::err_alignas_mismatch)
<< (unsigned)S.Context.toCharUnitsFromBits(OldAlign).getQuantity()
<< (unsigned)S.Context.toCharUnitsFromBits(NewAlign).getQuantity();
S.Diag(OldAlignasAttr->getLocation(), diag::note_previous_declaration);
}
}
if (OldAlignasAttr && !NewAlignasAttr && isAttributeTargetADefinition(New)) {
// C++11 [dcl.align]p6:
// if any declaration of an entity has an alignment-specifier,
// every defining declaration of that entity shall specify an
// equivalent alignment.
// C11 6.7.5/7:
// If the definition of an object does not have an alignment
// specifier, any other declaration of that object shall also
// have no alignment specifier.
S.Diag(New->getLocation(), diag::err_alignas_missing_on_definition)
<< OldAlignasAttr;
S.Diag(OldAlignasAttr->getLocation(), diag::note_alignas_on_declaration)
<< OldAlignasAttr;
}
bool AnyAdded = false;
// Ensure we have an attribute representing the strictest alignment.
if (OldAlign > NewAlign) {
AlignedAttr *Clone = OldStrictestAlignAttr->clone(S.Context);
Clone->setInherited(true);
New->addAttr(Clone);
AnyAdded = true;
}
// Ensure we have an alignas attribute if the old declaration had one.
if (OldAlignasAttr && !NewAlignasAttr &&
!(AnyAdded && OldStrictestAlignAttr->isAlignas())) {
AlignedAttr *Clone = OldAlignasAttr->clone(S.Context);
Clone->setInherited(true);
New->addAttr(Clone);
AnyAdded = true;
}
return AnyAdded;
}
static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
const InheritableAttr *Attr,
Sema::AvailabilityMergeKind AMK) {
// This function copies an attribute Attr from a previous declaration to the
// new declaration D if the new declaration doesn't itself have that attribute
// yet or if that attribute allows duplicates.
// If you're adding a new attribute that requires logic different from
// "use explicit attribute on decl if present, else use attribute from
// previous decl", for example if the attribute needs to be consistent
// between redeclarations, you need to call a custom merge function here.
InheritableAttr *NewAttr = nullptr;
unsigned AttrSpellingListIndex = Attr->getSpellingListIndex();
if (const auto *AA = dyn_cast<AvailabilityAttr>(Attr))
NewAttr = S.mergeAvailabilityAttr(D, AA->getRange(), AA->getPlatform(),
AA->isImplicit(), AA->getIntroduced(),
AA->getDeprecated(),
AA->getObsoleted(), AA->getUnavailable(),
AA->getMessage(), AA->getStrict(),
AA->getReplacement(), AMK,
AttrSpellingListIndex);
else if (const auto *VA = dyn_cast<VisibilityAttr>(Attr))
NewAttr = S.mergeVisibilityAttr(D, VA->getRange(), VA->getVisibility(),
AttrSpellingListIndex);
else if (const auto *VA = dyn_cast<TypeVisibilityAttr>(Attr))
NewAttr = S.mergeTypeVisibilityAttr(D, VA->getRange(), VA->getVisibility(),
AttrSpellingListIndex);
else if (const auto *ImportA = dyn_cast<DLLImportAttr>(Attr))
NewAttr = S.mergeDLLImportAttr(D, ImportA->getRange(),
AttrSpellingListIndex);
else if (const auto *ExportA = dyn_cast<DLLExportAttr>(Attr))
NewAttr = S.mergeDLLExportAttr(D, ExportA->getRange(),
AttrSpellingListIndex);
else if (const auto *FA = dyn_cast<FormatAttr>(Attr))
NewAttr = S.mergeFormatAttr(D, FA->getRange(), FA->getType(),
FA->getFormatIdx(), FA->getFirstArg(),
AttrSpellingListIndex);
else if (const auto *SA = dyn_cast<SectionAttr>(Attr))
NewAttr = S.mergeSectionAttr(D, SA->getRange(), SA->getName(),
AttrSpellingListIndex);
else if (const auto *CSA = dyn_cast<CodeSegAttr>(Attr))
NewAttr = S.mergeCodeSegAttr(D, CSA->getRange(), CSA->getName(),
AttrSpellingListIndex);
else if (const auto *IA = dyn_cast<MSInheritanceAttr>(Attr))
NewAttr = S.mergeMSInheritanceAttr(D, IA->getRange(), IA->getBestCase(),
AttrSpellingListIndex,
IA->getSemanticSpelling());
else if (const auto *AA = dyn_cast<AlwaysInlineAttr>(Attr))
NewAttr = S.mergeAlwaysInlineAttr(D, AA->getRange(),
&S.Context.Idents.get(AA->getSpelling()),
AttrSpellingListIndex);
else if (S.getLangOpts().CUDA && isa<FunctionDecl>(D) &&
(isa<CUDAHostAttr>(Attr) || isa<CUDADeviceAttr>(Attr) ||
isa<CUDAGlobalAttr>(Attr))) {
// CUDA target attributes are part of function signature for
// overloading purposes and must not be merged.
return false;
} else if (const auto *MA = dyn_cast<MinSizeAttr>(Attr))
NewAttr = S.mergeMinSizeAttr(D, MA->getRange(), AttrSpellingListIndex);
else if (const auto *OA = dyn_cast<OptimizeNoneAttr>(Attr))
NewAttr = S.mergeOptimizeNoneAttr(D, OA->getRange(), AttrSpellingListIndex);
else if (const auto *InternalLinkageA = dyn_cast<InternalLinkageAttr>(Attr))
NewAttr = S.mergeInternalLinkageAttr(
D, InternalLinkageA->getRange(),
&S.Context.Idents.get(InternalLinkageA->getSpelling()),
AttrSpellingListIndex);
else if (const auto *CommonA = dyn_cast<CommonAttr>(Attr))
NewAttr = S.mergeCommonAttr(D, CommonA->getRange(),
&S.Context.Idents.get(CommonA->getSpelling()),
AttrSpellingListIndex);
else if (isa<AlignedAttr>(Attr))
// AlignedAttrs are handled separately, because we need to handle all
// such attributes on a declaration at the same time.
NewAttr = nullptr;
else if ((isa<DeprecatedAttr>(Attr) || isa<UnavailableAttr>(Attr)) &&
(AMK == Sema::AMK_Override ||
AMK == Sema::AMK_ProtocolImplementation))
NewAttr = nullptr;
else if (const auto *UA = dyn_cast<UuidAttr>(Attr))
NewAttr = S.mergeUuidAttr(D, UA->getRange(), AttrSpellingListIndex,
UA->getGuid());
else if (Attr->shouldInheritEvenIfAlreadyPresent() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
if (NewAttr) {
NewAttr->setInherited(true);
D->addAttr(NewAttr);
if (isa<MSInheritanceAttr>(NewAttr))
S.Consumer.AssignInheritanceModel(cast<CXXRecordDecl>(D));
return true;
}
return false;
}
static const NamedDecl *getDefinition(const Decl *D) {
if (const TagDecl *TD = dyn_cast<TagDecl>(D))
return TD->getDefinition();
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
const VarDecl *Def = VD->getDefinition();
if (Def)
return Def;
return VD->getActingDefinition();
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
return FD->getDefinition();
return nullptr;
}
static bool hasAttribute(const Decl *D, attr::Kind Kind) {
for (const auto *Attribute : D->attrs())
if (Attribute->getKind() == Kind)
return true;
return false;
}
/// checkNewAttributesAfterDef - If we already have a definition, check that
/// there are no new attributes in this declaration.
static void checkNewAttributesAfterDef(Sema &S, Decl *New, const Decl *Old) {
if (!New->hasAttrs())
return;
const NamedDecl *Def = getDefinition(Old);
if (!Def || Def == New)
return;
AttrVec &NewAttributes = New->getAttrs();
for (unsigned I = 0, E = NewAttributes.size(); I != E;) {
const Attr *NewAttribute = NewAttributes[I];
if (isa<AliasAttr>(NewAttribute) || isa<IFuncAttr>(NewAttribute)) {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(New)) {
Sema::SkipBodyInfo SkipBody;
S.CheckForFunctionRedefinition(FD, cast<FunctionDecl>(Def), &SkipBody);
// If we're skipping this definition, drop the "alias" attribute.
if (SkipBody.ShouldSkip) {
NewAttributes.erase(NewAttributes.begin() + I);
--E;
continue;
}
} else {
VarDecl *VD = cast<VarDecl>(New);
unsigned Diag = cast<VarDecl>(Def)->isThisDeclarationADefinition() ==
VarDecl::TentativeDefinition
? diag::err_alias_after_tentative
: diag::err_redefinition;
S.Diag(VD->getLocation(), Diag) << VD->getDeclName();
if (Diag == diag::err_redefinition)
S.notePreviousDefinition(Def, VD->getLocation());
else
S.Diag(Def->getLocation(), diag::note_previous_definition);
VD->setInvalidDecl();
}
++I;
continue;
}
if (const VarDecl *VD = dyn_cast<VarDecl>(Def)) {
// Tentative definitions are only interesting for the alias check above.
if (VD->isThisDeclarationADefinition() != VarDecl::Definition) {
++I;
continue;
}
}
if (hasAttribute(Def, NewAttribute->getKind())) {
++I;
continue; // regular attr merging will take care of validating this.
}
if (isa<C11NoReturnAttr>(NewAttribute)) {
// C's _Noreturn is allowed to be added to a function after it is defined.
++I;
continue;
} else if (const AlignedAttr *AA = dyn_cast<AlignedAttr>(NewAttribute)) {
if (AA->isAlignas()) {
// C++11 [dcl.align]p6:
// if any declaration of an entity has an alignment-specifier,
// every defining declaration of that entity shall specify an
// equivalent alignment.
// C11 6.7.5/7:
// If the definition of an object does not have an alignment
// specifier, any other declaration of that object shall also
// have no alignment specifier.
S.Diag(Def->getLocation(), diag::err_alignas_missing_on_definition)
<< AA;
S.Diag(NewAttribute->getLocation(), diag::note_alignas_on_declaration)
<< AA;
NewAttributes.erase(NewAttributes.begin() + I);
--E;
continue;
}
}
S.Diag(NewAttribute->getLocation(),
diag::warn_attribute_precede_definition);
S.Diag(Def->getLocation(), diag::note_previous_definition);
NewAttributes.erase(NewAttributes.begin() + I);
--E;
}
}
/// mergeDeclAttributes - Copy attributes from the Old decl to the New one.
void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK) {
if (UsedAttr *OldAttr = Old->getMostRecentDecl()->getAttr<UsedAttr>()) {
UsedAttr *NewAttr = OldAttr->clone(Context);
NewAttr->setInherited(true);
New->addAttr(NewAttr);
}
if (!Old->hasAttrs() && !New->hasAttrs())
return;
// Attributes declared post-definition are currently ignored.
checkNewAttributesAfterDef(*this, New, Old);
if (AsmLabelAttr *NewA = New->getAttr<AsmLabelAttr>()) {
if (AsmLabelAttr *OldA = Old->getAttr<AsmLabelAttr>()) {
if (OldA->getLabel() != NewA->getLabel()) {
// This redeclaration changes __asm__ label.
Diag(New->getLocation(), diag::err_different_asm_label);
Diag(OldA->getLocation(), diag::note_previous_declaration);
}
} else if (Old->isUsed()) {
// This redeclaration adds an __asm__ label to a declaration that has
// already been ODR-used.
Diag(New->getLocation(), diag::err_late_asm_label_name)
<< isa<FunctionDecl>(Old) << New->getAttr<AsmLabelAttr>()->getRange();
}
}
// Re-declaration cannot add abi_tag's.
if (const auto *NewAbiTagAttr = New->getAttr<AbiTagAttr>()) {
if (const auto *OldAbiTagAttr = Old->getAttr<AbiTagAttr>()) {
for (const auto &NewTag : NewAbiTagAttr->tags()) {
if (std::find(OldAbiTagAttr->tags_begin(), OldAbiTagAttr->tags_end(),
NewTag) == OldAbiTagAttr->tags_end()) {
Diag(NewAbiTagAttr->getLocation(),
diag::err_new_abi_tag_on_redeclaration)
<< NewTag;
Diag(OldAbiTagAttr->getLocation(), diag::note_previous_declaration);
}
}
} else {
Diag(NewAbiTagAttr->getLocation(), diag::err_abi_tag_on_redeclaration);
Diag(Old->getLocation(), diag::note_previous_declaration);
}
}
// This redeclaration adds a section attribute.
if (New->hasAttr<SectionAttr>() && !Old->hasAttr<SectionAttr>()) {
if (auto *VD = dyn_cast<VarDecl>(New)) {
if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly) {
Diag(New->getLocation(), diag::warn_attribute_section_on_redeclaration);
Diag(Old->getLocation(), diag::note_previous_declaration);
}
}
}
// Redeclaration adds code-seg attribute.
const auto *NewCSA = New->getAttr<CodeSegAttr>();
if (NewCSA && !Old->hasAttr<CodeSegAttr>() &&
!NewCSA->isImplicit() && isa<CXXMethodDecl>(New)) {
Diag(New->getLocation(), diag::warn_mismatched_section)
<< 0 /*codeseg*/;
Diag(Old->getLocation(), diag::note_previous_declaration);
}
if (!Old->hasAttrs())
return;
bool foundAny = New->hasAttrs();
// Ensure that any moving of objects within the allocated map is done before
// we process them.
if (!foundAny) New->setAttrs(AttrVec());
for (auto *I : Old->specific_attrs<InheritableAttr>()) {
// Ignore deprecated/unavailable/availability attributes if requested.
AvailabilityMergeKind LocalAMK = AMK_None;
if (isa<DeprecatedAttr>(I) ||
isa<UnavailableAttr>(I) ||
isa<AvailabilityAttr>(I)) {
switch (AMK) {
case AMK_None:
continue;
case AMK_Redeclaration:
case AMK_Override:
case AMK_ProtocolImplementation:
LocalAMK = AMK;
break;
}
}
// Already handled.
if (isa<UsedAttr>(I))
continue;
if (mergeDeclAttribute(*this, New, I, LocalAMK))
foundAny = true;
}
if (mergeAlignedAttrs(*this, New, Old))
foundAny = true;
if (!foundAny) New->dropAttrs();
}
/// mergeParamDeclAttributes - Copy attributes from the old parameter
/// to the new one.
static void mergeParamDeclAttributes(ParmVarDecl *newDecl,
const ParmVarDecl *oldDecl,
Sema &S) {
// C++11 [dcl.attr.depend]p2:
// The first declaration of a function shall specify the
// carries_dependency attribute for its declarator-id if any declaration
// of the function specifies the carries_dependency attribute.
const CarriesDependencyAttr *CDA = newDecl->getAttr<CarriesDependencyAttr>();
if (CDA && !oldDecl->hasAttr<CarriesDependencyAttr>()) {
S.Diag(CDA->getLocation(),
diag::err_carries_dependency_missing_on_first_decl) << 1/*Param*/;
// Find the first declaration of the parameter.
// FIXME: Should we build redeclaration chains for function parameters?
const FunctionDecl *FirstFD =
cast<FunctionDecl>(oldDecl->getDeclContext())->getFirstDecl();
const ParmVarDecl *FirstVD =
FirstFD->getParamDecl(oldDecl->getFunctionScopeIndex());
S.Diag(FirstVD->getLocation(),
diag::note_carries_dependency_missing_first_decl) << 1/*Param*/;
}
if (!oldDecl->hasAttrs())
return;
bool foundAny = newDecl->hasAttrs();
// Ensure that any moving of objects within the allocated map is
// done before we process them.
if (!foundAny) newDecl->setAttrs(AttrVec());
for (const auto *I : oldDecl->specific_attrs<InheritableParamAttr>()) {
if (!DeclHasAttr(newDecl, I)) {
InheritableAttr *newAttr =
cast<InheritableParamAttr>(I->clone(S.Context));
newAttr->setInherited(true);
newDecl->addAttr(newAttr);
foundAny = true;
}
}
if (!foundAny) newDecl->dropAttrs();
}
static void mergeParamDeclTypes(ParmVarDecl *NewParam,
const ParmVarDecl *OldParam,
Sema &S) {
if (auto Oldnullability = OldParam->getType()->getNullability(S.Context)) {
if (auto Newnullability = NewParam->getType()->getNullability(S.Context)) {
if (*Oldnullability != *Newnullability) {
S.Diag(NewParam->getLocation(), diag::warn_mismatched_nullability_attr)
<< DiagNullabilityKind(
*Newnullability,
((NewParam->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
!= 0))
<< DiagNullabilityKind(
*Oldnullability,
((OldParam->getObjCDeclQualifier() & Decl::OBJC_TQ_CSNullability)
!= 0));
S.Diag(OldParam->getLocation(), diag::note_previous_declaration);
}
} else {
QualType NewT = NewParam->getType();
NewT = S.Context.getAttributedType(
AttributedType::getNullabilityAttrKind(*Oldnullability),
NewT, NewT);
NewParam->setType(NewT);
}
}
}
namespace {
/// Used in MergeFunctionDecl to keep track of function parameters in
/// C.
struct GNUCompatibleParamWarning {
ParmVarDecl *OldParm;
ParmVarDecl *NewParm;
QualType PromotedType;
};
} // end anonymous namespace
/// getSpecialMember - get the special member enum for a method.
Sema::CXXSpecialMember Sema::getSpecialMember(const CXXMethodDecl *MD) {
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
if (Ctor->isDefaultConstructor())
return Sema::CXXDefaultConstructor;
if (Ctor->isCopyConstructor())
return Sema::CXXCopyConstructor;
if (Ctor->isMoveConstructor())
return Sema::CXXMoveConstructor;
} else if (isa<CXXDestructorDecl>(MD)) {
return Sema::CXXDestructor;
} else if (MD->isCopyAssignmentOperator()) {
return Sema::CXXCopyAssignment;
} else if (MD->isMoveAssignmentOperator()) {
return Sema::CXXMoveAssignment;
}
return Sema::CXXInvalid;
}
// Determine whether the previous declaration was a definition, implicit
// declaration, or a declaration.
template <typename T>
static std::pair<diag::kind, SourceLocation>
getNoteDiagForInvalidRedeclaration(const T *Old, const T *New) {
diag::kind PrevDiag;
SourceLocation OldLocation = Old->getLocation();
if (Old->isThisDeclarationADefinition())
PrevDiag = diag::note_previous_definition;
else if (Old->isImplicit()) {
PrevDiag = diag::note_previous_implicit_declaration;
if (OldLocation.isInvalid())
OldLocation = New->getLocation();
} else
PrevDiag = diag::note_previous_declaration;
return std::make_pair(PrevDiag, OldLocation);
}
/// canRedefineFunction - checks if a function can be redefined. Currently,
/// only extern inline functions can be redefined, and even then only in
/// GNU89 mode.
static bool canRedefineFunction(const FunctionDecl *FD,
const LangOptions& LangOpts) {
return ((FD->hasAttr<GNUInlineAttr>() || LangOpts.GNUInline) &&
!LangOpts.CPlusPlus &&
FD->isInlineSpecified() &&
FD->getStorageClass() == SC_Extern);
}
const AttributedType *Sema::getCallingConvAttributedType(QualType T) const {
const AttributedType *AT = T->getAs<AttributedType>();
while (AT && !AT->isCallingConv())
AT = AT->getModifiedType()->getAs<AttributedType>();
return AT;
}
template <typename T>
static bool haveIncompatibleLanguageLinkages(const T *Old, const T *New) {
const DeclContext *DC = Old->getDeclContext();
if (DC->isRecord())
return false;
LanguageLinkage OldLinkage = Old->getLanguageLinkage();
if (OldLinkage == CXXLanguageLinkage && New->isInExternCContext())
return true;
if (OldLinkage == CLanguageLinkage && New->isInExternCXXContext())
return true;
return false;
}
template<typename T> static bool isExternC(T *D) { return D->isExternC(); }
static bool isExternC(VarTemplateDecl *) { return false; }
/// Check whether a redeclaration of an entity introduced by a
/// using-declaration is valid, given that we know it's not an overload
/// (nor a hidden tag declaration).
template<typename ExpectedDecl>
static bool checkUsingShadowRedecl(Sema &S, UsingShadowDecl *OldS,
ExpectedDecl *New) {
// C++11 [basic.scope.declarative]p4:
// Given a set of declarations in a single declarative region, each of
// which specifies the same unqualified name,
// -- they shall all refer to the same entity, or all refer to functions
// and function templates; or
// -- exactly one declaration shall declare a class name or enumeration
// name that is not a typedef name and the other declarations shall all
// refer to the same variable or enumerator, or all refer to functions
// and function templates; in this case the class name or enumeration
// name is hidden (3.3.10).
// C++11 [namespace.udecl]p14:
// If a function declaration in namespace scope or block scope has the
// same name and the same parameter-type-list as a function introduced
// by a using-declaration, and the declarations do not declare the same
// function, the program is ill-formed.
auto *Old = dyn_cast<ExpectedDecl>(OldS->getTargetDecl());
if (Old &&
!Old->getDeclContext()->getRedeclContext()->Equals(
New->getDeclContext()->getRedeclContext()) &&
!(isExternC(Old) && isExternC(New)))
Old = nullptr;
if (!Old) {
S.Diag(New->getLocation(), diag::err_using_decl_conflict_reverse);
S.Diag(OldS->getTargetDecl()->getLocation(), diag::note_using_decl_target);
S.Diag(OldS->getUsingDecl()->getLocation(), diag::note_using_decl) << 0;
return true;
}
return false;
}
static bool hasIdenticalPassObjectSizeAttrs(const FunctionDecl *A,
const FunctionDecl *B) {
assert(A->getNumParams() == B->getNumParams());
auto AttrEq = [](const ParmVarDecl *A, const ParmVarDecl *B) {
const auto *AttrA = A->getAttr<PassObjectSizeAttr>();
const auto *AttrB = B->getAttr<PassObjectSizeAttr>();
if (AttrA == AttrB)
return true;
return AttrA && AttrB && AttrA->getType() == AttrB->getType();
};
return std::equal(A->param_begin(), A->param_end(), B->param_begin(), AttrEq);
}
/// If necessary, adjust the semantic declaration context for a qualified
/// declaration to name the correct inline namespace within the qualifier.
static void adjustDeclContextForDeclaratorDecl(DeclaratorDecl *NewD,
DeclaratorDecl *OldD) {
// The only case where we need to update the DeclContext is when
// redeclaration lookup for a qualified name finds a declaration
// in an inline namespace within the context named by the qualifier:
//
// inline namespace N { int f(); }
// int ::f(); // Sema DC needs adjusting from :: to N::.
//
// For unqualified declarations, the semantic context *can* change
// along the redeclaration chain (for local extern declarations,
// extern "C" declarations, and friend declarations in particular).
if (!NewD->getQualifier())
return;
// NewD is probably already in the right context.
auto *NamedDC = NewD->getDeclContext()->getRedeclContext();
auto *SemaDC = OldD->getDeclContext()->getRedeclContext();
if (NamedDC->Equals(SemaDC))
return;
assert((NamedDC->InEnclosingNamespaceSetOf(SemaDC) ||
NewD->isInvalidDecl() || OldD->isInvalidDecl()) &&
"unexpected context for redeclaration");
auto *LexDC = NewD->getLexicalDeclContext();
auto FixSemaDC = [=](NamedDecl *D) {
if (!D)
return;
D->setDeclContext(SemaDC);
D->setLexicalDeclContext(LexDC);
};
FixSemaDC(NewD);
if (auto *FD = dyn_cast<FunctionDecl>(NewD))
FixSemaDC(FD->getDescribedFunctionTemplate());
else if (auto *VD = dyn_cast<VarDecl>(NewD))
FixSemaDC(VD->getDescribedVarTemplate());
}
/// MergeFunctionDecl - We just parsed a function 'New' from
/// declarator D which has the same name and scope as a previous
/// declaration 'Old'. Figure out how to resolve this situation,
/// merging decls or emitting diagnostics as appropriate.
///
/// In C++, New and Old must be declarations that are not
/// overloaded. Use IsOverload to determine whether New and Old are
/// overloaded, and to select the Old declaration that New should be
/// merged with.
///
/// Returns true if there was an error, false otherwise.
bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
Scope *S, bool MergeTypeWithOld) {
// Verify the old decl was also a function.
FunctionDecl *Old = OldD->getAsFunction();
if (!Old) {
if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(OldD)) {
if (New->getFriendObjectKind()) {
Diag(New->getLocation(), diag::err_using_decl_friend);
Diag(Shadow->getTargetDecl()->getLocation(),
diag::note_using_decl_target);
Diag(Shadow->getUsingDecl()->getLocation(),
diag::note_using_decl) << 0;
return true;
}
// Check whether the two declarations might declare the same function.
if (checkUsingShadowRedecl<FunctionDecl>(*this, Shadow, New))
return true;
OldD = Old = cast<FunctionDecl>(Shadow->getTargetDecl());
} else {
Diag(New->getLocation(), diag::err_redefinition_different_kind)
<< New->getDeclName();
notePreviousDefinition(OldD, New->getLocation());
return true;
}
}
// If the old declaration is invalid, just give up here.
if (Old->isInvalidDecl())
return true;
// Disallow redeclaration of some builtins.
if (!getASTContext().canBuiltinBeRedeclared(Old)) {
Diag(New->getLocation(), diag::err_builtin_redeclare) << Old->getDeclName();
Diag(Old->getLocation(), diag::note_previous_builtin_declaration)
<< Old << Old->getType();
return true;
}
diag::kind PrevDiag;
SourceLocation OldLocation;
std::tie(PrevDiag, OldLocation) =
getNoteDiagForInvalidRedeclaration(Old, New);
// Don't complain about this if we're in GNU89 mode and the old function
// is an extern inline function.
// Don't complain about specializations. They are not supposed to have
// storage classes.
if (!isa<CXXMethodDecl>(New) && !isa<CXXMethodDecl>(Old) &&
New->getStorageClass() == SC_Static &&
Old->hasExternalFormalLinkage() &&
!New->getTemplateSpecializationInfo() &&
!canRedefineFunction(Old, getLangOpts())) {
if (getLangOpts().MicrosoftExt) {
Diag(New->getLocation(), diag::ext_static_non_static) << New;
Diag(OldLocation, PrevDiag);
} else {
Diag(New->getLocation(), diag::err_static_non_static) << New;
Diag(OldLocation, PrevDiag);
return true;
}
}
if (New->hasAttr<InternalLinkageAttr>() &&
!Old->hasAttr<InternalLinkageAttr>()) {
Diag(New->getLocation(), diag::err_internal_linkage_redeclaration)
<< New->getDeclName();
notePreviousDefinition(Old, New->getLocation());
New->dropAttr<InternalLinkageAttr>();
}
if (CheckRedeclarationModuleOwnership(New, Old))
return true;
if (!getLangOpts().CPlusPlus) {
bool OldOvl = Old->hasAttr<OverloadableAttr>();
if (OldOvl != New->hasAttr<OverloadableAttr>() && !Old->isImplicit()) {
Diag(New->getLocation(), diag::err_attribute_overloadable_mismatch)
<< New << OldOvl;
// Try our best to find a decl that actually has the overloadable
// attribute for the note. In most cases (e.g. programs with only one
// broken declaration/definition), this won't matter.
//
// FIXME: We could do this if we juggled some extra state in
// OverloadableAttr, rather than just removing it.
const Decl *DiagOld = Old;
if (OldOvl) {
auto OldIter = llvm::find_if(Old->redecls(), [](const Decl *D) {
const auto *A = D->getAttr<OverloadableAttr>();
return A && !A->isImplicit();
});
// If we've implicitly added *all* of the overloadable attrs to this
// chain, emitting a "previous redecl" note is pointless.
DiagOld = OldIter == Old->redecls_end() ? nullptr : *OldIter;
}
if (DiagOld)
Diag(DiagOld->getLocation(),
diag::note_attribute_overloadable_prev_overload)
<< OldOvl;
if (OldOvl)
New->addAttr(OverloadableAttr::CreateImplicit(Context));
else
New->dropAttr<OverloadableAttr>();
}
}
// If a function is first declared with a calling convention, but is later
// declared or defined without one, all following decls assume the calling
// convention of the first.
//
// It's OK if a function is first declared without a calling convention,
// but is later declared or defined with the default calling convention.
//
// To test if either decl has an explicit calling convention, we look for
// AttributedType sugar nodes on the type as written. If they are missing or
// were canonicalized away, we assume the calling convention was implicit.
//
// Note also that we DO NOT return at this point, because we still have
// other tests to run.
QualType OldQType = Context.getCanonicalType(Old->getType());
QualType NewQType = Context.getCanonicalType(New->getType());
const FunctionType *OldType = cast<FunctionType>(OldQType);
const FunctionType *NewType = cast<FunctionType>(NewQType);
FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo();
FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo();
bool RequiresAdjustment = false;
if (OldTypeInfo.getCC() != NewTypeInfo.getCC()) {
FunctionDecl *First = Old->getFirstDecl();
const FunctionType *FT =
First->getType().getCanonicalType()->castAs<FunctionType>();
FunctionType::ExtInfo FI = FT->getExtInfo();
bool NewCCExplicit = getCallingConvAttributedType(New->getType());
if (!NewCCExplicit) {
// Inherit the CC from the previous declaration if it was specified
// there but not here.
NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC());
RequiresAdjustment = true;
} else {
// Calling conventions aren't compatible, so complain.
bool FirstCCExplicit = getCallingConvAttributedType(First->getType());
Diag(New->getLocation(), diag::err_cconv_change)
<< FunctionType::getNameForCallConv(NewTypeInfo.getCC())
<< !FirstCCExplicit
<< (!FirstCCExplicit ? "" :
FunctionType::getNameForCallConv(FI.getCC()));
// Put the note on the first decl, since it is the one that matters.
Diag(First->getLocation(), diag::note_previous_declaration);
return true;
}
}
// FIXME: diagnose the other way around?
if (OldTypeInfo.getNoReturn() && !NewTypeInfo.getNoReturn()) {
NewTypeInfo = NewTypeInfo.withNoReturn(true);
RequiresAdjustment = true;
}
// Merge regparm attribute.
if (OldTypeInfo.getHasRegParm() != NewTypeInfo.getHasRegParm() ||
OldTypeInfo.getRegParm() != NewTypeInfo.getRegParm()) {
if (NewTypeInfo.getHasRegParm()) {
Diag(New->getLocation(), diag::err_regparm_mismatch)
<< NewType->getRegParmType()
<< OldType->getRegParmType();
Diag(OldLocation, diag::note_previous_declaration);
return true;
}
NewTypeInfo = NewTypeInfo.withRegParm(OldTypeInfo.getRegParm());
RequiresAdjustment = true;
}
// Merge ns_returns_retained attribute.
if (OldTypeInfo.getProducesResult() != NewTypeInfo.getProducesResult()) {
if (NewTypeInfo.getProducesResult()) {
Diag(New->getLocation(), diag::err_function_attribute_mismatch)
<< "'ns_returns_retained'";
Diag(OldLocation, diag::note_previous_declaration);
return true;
}
NewTypeInfo = NewTypeInfo.withProducesResult(true);
RequiresAdjustment = true;
}
if (OldTypeInfo.getNoCallerSavedRegs() !=
NewTypeInfo.getNoCallerSavedRegs()) {
if (NewTypeInfo.getNoCallerSavedRegs()) {
AnyX86NoCallerSavedRegistersAttr *Attr =
New->getAttr<AnyX86NoCallerSavedRegistersAttr>();
Diag(New->getLocation(), diag::err_function_attribute_mismatch) << Attr;
Diag(OldLocation, diag::note_previous_declaration);
return true;
}
NewTypeInfo = NewTypeInfo.withNoCallerSavedRegs(true);
RequiresAdjustment = true;
}
if (RequiresAdjustment) {
const FunctionType *AdjustedType = New->getType()->getAs<FunctionType>();
AdjustedType = Context.adjustFunctionType(AdjustedType, NewTypeInfo);
New->setType(QualType(AdjustedType, 0));
NewQType = Context.getCanonicalType(New->getType());
NewType = cast<FunctionType>(NewQType);
}
// If this redeclaration makes the function inline, we may need to add it to
// UndefinedButUsed.
if (!Old->isInlined() && New->isInlined() &&
!New->hasAttr<GNUInlineAttr>() &&
!getLangOpts().GNUInline &&
Old->isUsed(false) &&
!Old->isDefined() && !New->isThisDeclarationADefinition())
UndefinedButUsed.insert(std::make_pair(Old->getCanonicalDecl(),
SourceLocation()));
// If this redeclaration makes it newly gnu_inline, we don't want to warn
// about it.
if (New->hasAttr<GNUInlineAttr>() &&
Old->isInlined() && !Old->hasAttr<GNUInlineAttr>()) {
UndefinedButUsed.erase(Old->getCanonicalDecl());
}
// If pass_object_size params don't match up perfectly, this isn't a valid
// redeclaration.
if (Old->getNumParams() > 0 && Old->getNumParams() == New->getNumParams() &&
!hasIdenticalPassObjectSizeAttrs(Old, New)) {
Diag(New->getLocation(), diag::err_different_pass_object_size_params)
<< New->getDeclName();
Diag(OldLocation, PrevDiag) << Old << Old->getType();
return true;
}
if (getLangOpts().CPlusPlus) {
// C++1z [over.load]p2
// Certain function declarations cannot be overloaded:
// -- Function declarations that differ only in the return type,
// the exception specification, or both cannot be overloaded.
// Check the exception specifications match. This may recompute the type of
// both Old and New if it resolved exception specifications, so grab the
// types again after this. Because this updates the type, we do this before
// any of the other checks below, which may update the "de facto" NewQType
// but do not necessarily update the type of New.
if (CheckEquivalentExceptionSpec(Old, New))
return true;
OldQType = Context.getCanonicalType(Old->getType());
NewQType = Context.getCanonicalType(New->getType());
// Go back to the type source info to compare the declared return types,
// per C++1y [dcl.type.auto]p13:
// Redeclarations or specializations of a function or function template
// with a declared return type that uses a placeholder type shall also
// use that placeholder, not a deduced type.
- QualType OldDeclaredReturnType =
- (Old->getTypeSourceInfo()
- ? Old->getTypeSourceInfo()->getType()->castAs<FunctionType>()
- : OldType)->getReturnType();
- QualType NewDeclaredReturnType =
- (New->getTypeSourceInfo()
- ? New->getTypeSourceInfo()->getType()->castAs<FunctionType>()
- : NewType)->getReturnType();
+ QualType OldDeclaredReturnType = Old->getDeclaredReturnType();
+ QualType NewDeclaredReturnType = New->getDeclaredReturnType();
if (!Context.hasSameType(OldDeclaredReturnType, NewDeclaredReturnType) &&
- !((NewQType->isDependentType() || OldQType->isDependentType()) &&
- New->isLocalExternDecl())) {
+ canFullyTypeCheckRedeclaration(New, Old, NewDeclaredReturnType,
+ OldDeclaredReturnType)) {
QualType ResQT;
if (NewDeclaredReturnType->isObjCObjectPointerType() &&
OldDeclaredReturnType->isObjCObjectPointerType())
+ // FIXME: This does the wrong thing for a deduced return type.
ResQT = Context.mergeObjCGCQualifiers(NewQType, OldQType);
if (ResQT.isNull()) {
if (New->isCXXClassMember() && New->isOutOfLine())
Diag(New->getLocation(), diag::err_member_def_does_not_match_ret_type)
<< New << New->getReturnTypeSourceRange();
else
Diag(New->getLocation(), diag::err_ovl_diff_return_type)
<< New->getReturnTypeSourceRange();
Diag(OldLocation, PrevDiag) << Old << Old->getType()
<< Old->getReturnTypeSourceRange();
return true;
}
else
NewQType = ResQT;
}
QualType OldReturnType = OldType->getReturnType();
QualType NewReturnType = cast<FunctionType>(NewQType)->getReturnType();
if (OldReturnType != NewReturnType) {
// If this function has a deduced return type and has already been
// defined, copy the deduced value from the old declaration.
AutoType *OldAT = Old->getReturnType()->getContainedAutoType();
if (OldAT && OldAT->isDeduced()) {
New->setType(
SubstAutoType(New->getType(),
OldAT->isDependentType() ? Context.DependentTy
: OldAT->getDeducedType()));
NewQType = Context.getCanonicalType(
SubstAutoType(NewQType,
OldAT->isDependentType() ? Context.DependentTy
: OldAT->getDeducedType()));
}
}
const CXXMethodDecl *OldMethod = dyn_cast<CXXMethodDecl>(Old);
CXXMethodDecl *NewMethod = dyn_cast<CXXMethodDecl>(New);
if (OldMethod && NewMethod) {
// Preserve triviality.
NewMethod->setTrivial(OldMethod->isTrivial());
// MSVC allows explicit template specialization at class scope:
// 2 CXXMethodDecls referring to the same function will be injected.
// We don't want a redeclaration error.
bool IsClassScopeExplicitSpecialization =
OldMethod->isFunctionTemplateSpecialization() &&
NewMethod->isFunctionTemplateSpecialization();
bool isFriend = NewMethod->getFriendObjectKind();
if (!isFriend && NewMethod->getLexicalDeclContext()->isRecord() &&
!IsClassScopeExplicitSpecialization) {
// -- Member function declarations with the same name and the
// same parameter types cannot be overloaded if any of them
// is a static member function declaration.
if (OldMethod->isStatic() != NewMethod->isStatic()) {
Diag(New->getLocation(), diag::err_ovl_static_nonstatic_member);
Diag(OldLocation, PrevDiag) << Old << Old->getType();
return true;
}
// C++ [class.mem]p1:
// [...] A member shall not be declared twice in the
// member-specification, except that a nested class or member
// class template can be declared and then later defined.
if (!inTemplateInstantiation()) {
unsigned NewDiag;
if (isa<CXXConstructorDecl>(OldMethod))
NewDiag = diag::err_constructor_redeclared;
else if (isa<CXXDestructorDecl>(NewMethod))
NewDiag = diag::err_destructor_redeclared;
else if (isa<CXXConversionDecl>(NewMethod))
NewDiag = diag::err_conv_function_redeclared;
else
NewDiag = diag::err_member_redeclared;
Diag(New->getLocation(), NewDiag);
} else {
Diag(New->getLocation(), diag::err_member_redeclared_in_instantiation)
<< New << New->getType();
}
Diag(OldLocation, PrevDiag) << Old << Old->getType();
return true;
// Complain if this is an explicit declaration of a special
// member that was initially declared implicitly.
//
// As an exception, it's okay to befriend such methods in order
// to permit the implicit constructor/destructor/operator calls.
} else if (OldMethod->isImplicit()) {
if (isFriend) {
NewMethod->setImplicit();
} else {
Diag(NewMethod->getLocation(),
diag::err_definition_of_implicitly_declared_member)
<< New << getSpecialMember(OldMethod);
return true;
}
} else if (OldMethod->getFirstDecl()->isExplicitlyDefaulted() && !isFriend) {
Diag(NewMethod->getLocation(),
diag::err_definition_of_explicitly_defaulted_member)
<< getSpecialMember(OldMethod);
return true;
}
}
// C++11 [dcl.attr.noreturn]p1:
// The first declaration of a function shall specify the noreturn
// attribute if any declaration of that function specifies the noreturn
// attribute.
const CXX11NoReturnAttr *NRA = New->getAttr<CXX11NoReturnAttr>();
if (NRA && !Old->hasAttr<CXX11NoReturnAttr>()) {
Diag(NRA->getLocation(), diag::err_noreturn_missing_on_first_decl);
Diag(Old->getFirstDecl()->getLocation(),
diag::note_noreturn_missing_first_decl);
}
// C++11 [dcl.attr.depend]p2:
// The first declaration of a function shall specify the
// carries_dependency attribute for its declarator-id if any declaration
// of the function specifies the carries_dependency attribute.
const CarriesDependencyAttr *CDA = New->getAttr<CarriesDependencyAttr>();
if (CDA && !Old->hasAttr<CarriesDependencyAttr>()) {
Diag(CDA->getLocation(),
diag::err_carries_dependency_missing_on_first_decl) << 0/*Function*/;
Diag(Old->getFirstDecl()->getLocation(),
diag::note_carries_dependency_missing_first_decl) << 0/*Function*/;
}
// (C++98 8.3.5p3):
// All declarations for a function shall agree exactly in both the
// return type and the parameter-type-list.
// We also want to respect all the extended bits except noreturn.
// noreturn should now match unless the old type info didn't have it.
QualType OldQTypeForComparison = OldQType;
if (!OldTypeInfo.getNoReturn() && NewTypeInfo.getNoReturn()) {
auto *OldType = OldQType->castAs<FunctionProtoType>();
const FunctionType *OldTypeForComparison
= Context.adjustFunctionType(OldType, OldTypeInfo.withNoReturn(true));
OldQTypeForComparison = QualType(OldTypeForComparison, 0);
assert(OldQTypeForComparison.isCanonical());
}
if (haveIncompatibleLanguageLinkages(Old, New)) {
// As a special case, retain the language linkage from previous
// declarations of a friend function as an extension.
//
// This liberal interpretation of C++ [class.friend]p3 matches GCC/MSVC
// and is useful because there's otherwise no way to specify language
// linkage within class scope.
//
// Check cautiously as the friend object kind isn't yet complete.
if (New->getFriendObjectKind() != Decl::FOK_None) {
Diag(New->getLocation(), diag::ext_retained_language_linkage) << New;
Diag(OldLocation, PrevDiag);
} else {
Diag(New->getLocation(), diag::err_different_language_linkage) << New;
Diag(OldLocation, PrevDiag);
return true;
}
}
if (OldQTypeForComparison == NewQType)
return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
- if ((NewQType->isDependentType() || OldQType->isDependentType()) &&
- New->isLocalExternDecl()) {
- // It's OK if we couldn't merge types for a local function declaraton
- // if either the old or new type is dependent. We'll merge the types
- // when we instantiate the function.
+ // If the types are imprecise (due to dependent constructs in friends or
+ // local extern declarations), it's OK if they differ. We'll check again
+ // during instantiation.
+ if (!canFullyTypeCheckRedeclaration(New, Old, NewQType, OldQType))
return false;
- }
// Fall through for conflicting redeclarations and redefinitions.
}
// C: Function types need to be compatible, not identical. This handles
// duplicate function decls like "void f(int); void f(enum X);" properly.
if (!getLangOpts().CPlusPlus &&
Context.typesAreCompatible(OldQType, NewQType)) {
const FunctionType *OldFuncType = OldQType->getAs<FunctionType>();
const FunctionType *NewFuncType = NewQType->getAs<FunctionType>();
const FunctionProtoType *OldProto = nullptr;
if (MergeTypeWithOld && isa<FunctionNoProtoType>(NewFuncType) &&
(OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) {
// The old declaration provided a function prototype, but the
// new declaration does not. Merge in the prototype.
assert(!OldProto->hasExceptionSpec() && "Exception spec in C");
SmallVector<QualType, 16> ParamTypes(OldProto->param_types());
NewQType =
Context.getFunctionType(NewFuncType->getReturnType(), ParamTypes,
OldProto->getExtProtoInfo());
New->setType(NewQType);
New->setHasInheritedPrototype();
// Synthesize parameters with the same types.
SmallVector<ParmVarDecl*, 16> Params;
for (const auto &ParamType : OldProto->param_types()) {
ParmVarDecl *Param = ParmVarDecl::Create(Context, New, SourceLocation(),
SourceLocation(), nullptr,
ParamType, /*TInfo=*/nullptr,
SC_None, nullptr);
Param->setScopeInfo(0, Params.size());
Param->setImplicit();
Params.push_back(Param);
}
New->setParams(Params);
}
return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
}
// GNU C permits a K&R definition to follow a prototype declaration
// if the declared types of the parameters in the K&R definition
// match the types in the prototype declaration, even when the
// promoted types of the parameters from the K&R definition differ
// from the types in the prototype. GCC then keeps the types from
// the prototype.
//
// If a variadic prototype is followed by a non-variadic K&R definition,
// the K&R definition becomes variadic. This is sort of an edge case, but
// it's legal per the standard depending on how you read C99 6.7.5.3p15 and
// C99 6.9.1p8.
if (!getLangOpts().CPlusPlus &&
Old->hasPrototype() && !New->hasPrototype() &&
New->getType()->getAs<FunctionProtoType>() &&
Old->getNumParams() == New->getNumParams()) {
SmallVector<QualType, 16> ArgTypes;
SmallVector<GNUCompatibleParamWarning, 16> Warnings;
const FunctionProtoType *OldProto
= Old->getType()->getAs<FunctionProtoType>();
const FunctionProtoType *NewProto
= New->getType()->getAs<FunctionProtoType>();
// Determine whether this is the GNU C extension.
QualType MergedReturn = Context.mergeTypes(OldProto->getReturnType(),
NewProto->getReturnType());
bool LooseCompatible = !MergedReturn.isNull();
for (unsigned Idx = 0, End = Old->getNumParams();
LooseCompatible && Idx != End; ++Idx) {
ParmVarDecl *OldParm = Old->getParamDecl(Idx);
ParmVarDecl *NewParm = New->getParamDecl(Idx);
if (Context.typesAreCompatible(OldParm->getType(),
NewProto->getParamType(Idx))) {
ArgTypes.push_back(NewParm->getType());
} else if (Context.typesAreCompatible(OldParm->getType(),
NewParm->getType(),
/*CompareUnqualified=*/true)) {
GNUCompatibleParamWarning Warn = { OldParm, NewParm,
NewProto->getParamType(Idx) };
Warnings.push_back(Warn);
ArgTypes.push_back(NewParm->getType());
} else
LooseCompatible = false;
}
if (LooseCompatible) {
for (unsigned Warn = 0; Warn < Warnings.size(); ++Warn) {
Diag(Warnings[Warn].NewParm->getLocation(),
diag::ext_param_promoted_not_compatible_with_prototype)
<< Warnings[Warn].PromotedType
<< Warnings[Warn].OldParm->getType();
if (Warnings[Warn].OldParm->getLocation().isValid())
Diag(Warnings[Warn].OldParm->getLocation(),
diag::note_previous_declaration);
}
if (MergeTypeWithOld)
New->setType(Context.getFunctionType(MergedReturn, ArgTypes,
OldProto->getExtProtoInfo()));
return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
}
// Fall through to diagnose conflicting types.
}
// A function that has already been declared has been redeclared or
// defined with a different type; show an appropriate diagnostic.
// If the previous declaration was an implicitly-generated builtin
// declaration, then at the very least we should use a specialized note.
unsigned BuiltinID;
if (Old->isImplicit() && (BuiltinID = Old->getBuiltinID())) {
// If it's actually a library-defined builtin function like 'malloc'
// or 'printf', just warn about the incompatible redeclaration.
if (Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) {
Diag(New->getLocation(), diag::warn_redecl_library_builtin) << New;
Diag(OldLocation, diag::note_previous_builtin_declaration)
<< Old << Old->getType();
// If this is a global redeclaration, just forget hereafter
// about the "builtin-ness" of the function.
//
// Doing this for local extern declarations is problematic. If
// the builtin declaration remains visible, a second invalid
// local declaration will produce a hard error; if it doesn't
// remain visible, a single bogus local redeclaration (which is
// actually only a warning) could break all the downstream code.
if (!New->getLexicalDeclContext()->isFunctionOrMethod())
New->getIdentifier()->revertBuiltin();
return false;
}
PrevDiag = diag::note_previous_builtin_declaration;
}
Diag(New->getLocation(), diag::err_conflicting_types) << New->getDeclName();
Diag(OldLocation, PrevDiag) << Old << Old->getType();
return true;
}
/// Completes the merge of two function declarations that are
/// known to be compatible.
///
/// This routine handles the merging of attributes and other
/// properties of function declarations from the old declaration to
/// the new declaration, once we know that New is in fact a
/// redeclaration of Old.
///
/// \returns false
bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld) {
// Merge the attributes
mergeDeclAttributes(New, Old);
// Merge "pure" flag.
if (Old->isPure())
New->setPure();
// Merge "used" flag.
if (Old->getMostRecentDecl()->isUsed(false))
New->setIsUsed();
// Merge attributes from the parameters. These can mismatch with K&R
// declarations.
if (New->getNumParams() == Old->getNumParams())
for (unsigned i = 0, e = New->getNumParams(); i != e; ++i) {
ParmVarDecl *NewParam = New->getParamDecl(i);
ParmVarDecl *OldParam = Old->getParamDecl(i);
mergeParamDeclAttributes(NewParam, OldParam, *this);
mergeParamDeclTypes(NewParam, OldParam, *this);
}
if (getLangOpts().CPlusPlus)
return MergeCXXFunctionDecl(New, Old, S);
// Merge the function types so the we get the composite types for the return
// and argument types. Per C11 6.2.7/4, only update the type if the old decl
// was visible.
QualType Merged = Context.mergeTypes(Old->getType(), New->getType());
if (!Merged.isNull() && MergeTypeWithOld)
New->setType(Merged);
return false;
}
void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod,
ObjCMethodDecl *oldMethod) {
// Merge the attributes, including deprecated/unavailable
AvailabilityMergeKind MergeKind =
isa<ObjCProtocolDecl>(oldMethod->getDeclContext())
? AMK_ProtocolImplementation
: isa<ObjCImplDecl>(newMethod->getDeclContext()) ? AMK_Redeclaration
: AMK_Override;
mergeDeclAttributes(newMethod, oldMethod, MergeKind);
// Merge attributes from the parameters.
ObjCMethodDecl::param_const_iterator oi = oldMethod->param_begin(),
oe = oldMethod->param_end();
for (ObjCMethodDecl::param_iterator
ni = newMethod->param_begin(), ne = newMethod->param_end();
ni != ne && oi != oe; ++ni, ++oi)
mergeParamDeclAttributes(*ni, *oi, *this);
CheckObjCMethodOverride(newMethod, oldMethod);
}
static void diagnoseVarDeclTypeMismatch(Sema &S, VarDecl *New, VarDecl* Old) {
assert(!S.Context.hasSameType(New->getType(), Old->getType()));
S.Diag(New->getLocation(), New->isThisDeclarationADefinition()
? diag::err_redefinition_different_type
: diag::err_redeclaration_different_type)
<< New->getDeclName() << New->getType() << Old->getType();
diag::kind PrevDiag;
SourceLocation OldLocation;
std::tie(PrevDiag, OldLocation)
= getNoteDiagForInvalidRedeclaration(Old, New);
S.Diag(OldLocation, PrevDiag);
New->setInvalidDecl();
}
/// MergeVarDeclTypes - We parsed a variable 'New' which has the same name and
/// scope as a previous declaration 'Old'. Figure out how to merge their types,
/// emitting diagnostics as appropriate.
///
/// Declarations using the auto type specifier (C++ [decl.spec.auto]) call back
/// to here in AddInitializerToDecl. We can't check them before the initializer
/// is attached.
void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old,
bool MergeTypeWithOld) {
if (New->isInvalidDecl() || Old->isInvalidDecl())
return;
QualType MergedT;
if (getLangOpts().CPlusPlus) {
if (New->getType()->isUndeducedType()) {
// We don't know what the new type is until the initializer is attached.
return;
} else if (Context.hasSameType(New->getType(), Old->getType())) {
// These could still be something that needs exception specs checked.
return MergeVarDeclExceptionSpecs(New, Old);
}
// C++ [basic.link]p10:
// [...] the types specified by all declarations referring to a given
// object or function shall be identical, except that declarations for an
// array object can specify array types that differ by the presence or
// absence of a major array bound (8.3.4).
else if (Old->getType()->isArrayType() && New->getType()->isArrayType()) {
const ArrayType *OldArray = Context.getAsArrayType(Old->getType());
const ArrayType *NewArray = Context.getAsArrayType(New->getType());
// We are merging a variable declaration New into Old. If it has an array
// bound, and that bound differs from Old's bound, we should diagnose the
// mismatch.
if (!NewArray->isIncompleteArrayType() && !NewArray->isDependentType()) {
for (VarDecl *PrevVD = Old->getMostRecentDecl(); PrevVD;
PrevVD = PrevVD->getPreviousDecl()) {
const ArrayType *PrevVDTy = Context.getAsArrayType(PrevVD->getType());
if (PrevVDTy->isIncompleteArrayType() || PrevVDTy->isDependentType())
continue;
if (!Context.hasSameType(NewArray, PrevVDTy))
return diagnoseVarDeclTypeMismatch(*this, New, PrevVD);
}
}
if (OldArray->isIncompleteArrayType() && NewArray->isArrayType()) {
if (Context.hasSameType(OldArray->getElementType(),
NewArray->getElementType()))
MergedT = New->getType();
}
// FIXME: Check visibility. New is hidden but has a complete type. If New
// has no array bound, it should not inherit one from Old, if Old is not
// visible.
else if (OldArray->isArrayType() && NewArray->isIncompleteArrayType()) {
if (Context.hasSameType(OldArray->getElementType(),
NewArray->getElementType()))
MergedT = Old->getType();
}
}
else if (New->getType()->isObjCObjectPointerType() &&
Old->getType()->isObjCObjectPointerType()) {
MergedT = Context.mergeObjCGCQualifiers(New->getType(),
Old->getType());
}
} else {
// C 6.2.7p2:
// All declarations that refer to the same object or function shall have
// compatible type.
MergedT = Context.mergeTypes(New->getType(), Old->getType());
}
if (MergedT.isNull()) {
// It's OK if we couldn't merge types if either type is dependent, for a
// block-scope variable. In other cases (static data members of class
// templates, variable templates, ...), we require the types to be
// equivalent.
// FIXME: The C++ standard doesn't say anything about this.
if ((New->getType()->isDependentType() ||
Old->getType()->isDependentType()) && New->isLocalVarDecl()) {
// If the old type was dependent, we can't merge with it, so the new type
// becomes dependent for now. We'll reproduce the original type when we
// instantiate the TypeSourceInfo for the variable.
if (!New->getType()->isDependentType() && MergeTypeWithOld)
New->setType(Context.DependentTy);
return;
}
return diagnoseVarDeclTypeMismatch(*this, New, Old);
}
// Don't actually update the type on the new declaration if the old
// declaration was an extern declaration in a different scope.
if (MergeTypeWithOld)
New->setType(MergedT);
}
static bool mergeTypeWithPrevious(Sema &S, VarDecl *NewVD, VarDecl *OldVD,
LookupResult &Previous) {
// C11 6.2.7p4:
// For an identifier with internal or external linkage declared
// in a scope in which a prior declaration of that identifier is
// visible, if the prior declaration specifies internal or
// external linkage, the type of the identifier at the later
// declaration becomes the composite type.
//
// If the variable isn't visible, we do not merge with its type.
if (Previous.isShadowed())
return false;
if (S.getLangOpts().CPlusPlus) {
// C++11 [dcl.array]p3:
// If there is a preceding declaration of the entity in the same
// scope in which the bound was specified, an omitted array bound
// is taken to be the same as in that earlier declaration.
return NewVD->isPreviousDeclInSameBlockScope() ||
(!OldVD->getLexicalDeclContext()->isFunctionOrMethod() &&
!NewVD->getLexicalDeclContext()->isFunctionOrMethod());
} else {
// If the old declaration was function-local, don't merge with its
// type unless we're in the same function.
return !OldVD->getLexicalDeclContext()->isFunctionOrMethod() ||
OldVD->getLexicalDeclContext() == NewVD->getLexicalDeclContext();
}
}
/// MergeVarDecl - We just parsed a variable 'New' which has the same name
/// and scope as a previous declaration 'Old'. Figure out how to resolve this
/// situation, merging decls or emitting diagnostics as appropriate.
///
/// Tentative definition rules (C99 6.9.2p2) are checked by
/// FinalizeDeclaratorGroup. Unfortunately, we can't analyze tentative
/// definitions here, since the initializer hasn't been attached.
///
void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
// If the new decl is already invalid, don't do any other checking.
if (New->isInvalidDecl())
return;
if (!shouldLinkPossiblyHiddenDecl(Previous, New))
return;
VarTemplateDecl *NewTemplate = New->getDescribedVarTemplate();
// Verify the old decl was also a variable or variable template.
VarDecl *Old = nullptr;
VarTemplateDecl *OldTemplate = nullptr;
if (Previous.isSingleResult()) {
if (NewTemplate) {
OldTemplate = dyn_cast<VarTemplateDecl>(Previous.getFoundDecl());
Old = OldTemplate ? OldTemplate->getTemplatedDecl() : nullptr;
if (auto *Shadow =
dyn_cast<UsingShadowDecl>(Previous.getRepresentativeDecl()))
if (checkUsingShadowRedecl<VarTemplateDecl>(*this, Shadow, NewTemplate))
return New->setInvalidDecl();
} else {
Old = dyn_cast<VarDecl>(Previous.getFoundDecl());
if (auto *Shadow =
dyn_cast<UsingShadowDecl>(Previous.getRepresentativeDecl()))
if (checkUsingShadowRedecl<VarDecl>(*this, Shadow, New))
return New->setInvalidDecl();
}
}
if (!Old) {
Diag(New->getLocation(), diag::err_redefinition_different_kind)
<< New->getDeclName();
notePreviousDefinition(Previous.getRepresentativeDecl(),
New->getLocation());
return New->setInvalidDecl();
}
// Ensure the template parameters are compatible.
if (NewTemplate &&
!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
OldTemplate->getTemplateParameters(),
/*Complain=*/true, TPL_TemplateMatch))
return New->setInvalidDecl();
// C++ [class.mem]p1:
// A member shall not be declared twice in the member-specification [...]
//
// Here, we need only consider static data members.
if (Old->isStaticDataMember() && !New->isOutOfLine()) {
Diag(New->getLocation(), diag::err_duplicate_member)
<< New->getIdentifier();
Diag(Old->getLocation(), diag::note_previous_declaration);
New->setInvalidDecl();
}
mergeDeclAttributes(New, Old);
// Warn if an already-declared variable is made a weak_import in a subsequent
// declaration
if (New->hasAttr<WeakImportAttr>() &&
Old->getStorageClass() == SC_None &&
!Old->hasAttr<WeakImportAttr>()) {
Diag(New->getLocation(), diag::warn_weak_import) << New->getDeclName();
notePreviousDefinition(Old, New->getLocation());
// Remove weak_import attribute on new declaration.
New->dropAttr<WeakImportAttr>();
}
if (New->hasAttr<InternalLinkageAttr>() &&
!Old->hasAttr<InternalLinkageAttr>()) {
Diag(New->getLocation(), diag::err_internal_linkage_redeclaration)
<< New->getDeclName();
notePreviousDefinition(Old, New->getLocation());
New->dropAttr<InternalLinkageAttr>();
}
// Merge the types.
VarDecl *MostRecent = Old->getMostRecentDecl();
if (MostRecent != Old) {
MergeVarDeclTypes(New, MostRecent,
mergeTypeWithPrevious(*this, New, MostRecent, Previous));
if (New->isInvalidDecl())
return;
}
MergeVarDeclTypes(New, Old, mergeTypeWithPrevious(*this, New, Old, Previous));
if (New->isInvalidDecl())
return;
diag::kind PrevDiag;
SourceLocation OldLocation;
std::tie(PrevDiag, OldLocation) =
getNoteDiagForInvalidRedeclaration(Old, New);
// [dcl.stc]p8: Check if we have a non-static decl followed by a static.
if (New->getStorageClass() == SC_Static &&
!New->isStaticDataMember() &&
Old->hasExternalFormalLinkage()) {
if (getLangOpts().MicrosoftExt) {
Diag(New->getLocation(), diag::ext_static_non_static)
<< New->getDeclName();
Diag(OldLocation, PrevDiag);
} else {
Diag(New->getLocation(), diag::err_static_non_static)
<< New->getDeclName();
Diag(OldLocation, PrevDiag);
return New->setInvalidDecl();
}
}
// C99 6.2.2p4:
// For an identifier declared with the storage-class specifier
// extern in a scope in which a prior declaration of that
// identifier is visible,23) if the prior declaration specifies
// internal or external linkage, the linkage of the identifier at
// the later declaration is the same as the linkage specified at
// the prior declaration. If no prior declaration is visible, or
// if the prior declaration specifies no linkage, then the
// identifier has external linkage.
if (New->hasExternalStorage() && Old->hasLinkage())
/* Okay */;
else if (New->getCanonicalDecl()->getStorageClass() != SC_Static &&
!New->isStaticDataMember() &&
Old->getCanonicalDecl()->getStorageClass() == SC_Static) {
Diag(New->getLocation(), diag::err_non_static_static) << New->getDeclName();
Diag(OldLocation, PrevDiag);
return New->setInvalidDecl();
}
// Check if extern is followed by non-extern and vice-versa.
if (New->hasExternalStorage() &&
!Old->hasLinkage() && Old->isLocalVarDeclOrParm()) {
Diag(New->getLocation(), diag::err_extern_non_extern) << New->getDeclName();
Diag(OldLocation, PrevDiag);
return New->setInvalidDecl();
}
if (Old->hasLinkage() && New->isLocalVarDeclOrParm() &&
!New->hasExternalStorage()) {
Diag(New->getLocation(), diag::err_non_extern_extern) << New->getDeclName();
Diag(OldLocation, PrevDiag);
return New->setInvalidDecl();
}
if (CheckRedeclarationModuleOwnership(New, Old))
return;
// Variables with external linkage are analyzed in FinalizeDeclaratorGroup.
// FIXME: The test for external storage here seems wrong? We still
// need to check for mismatches.
if (!New->hasExternalStorage() && !New->isFileVarDecl() &&
// Don't complain about out-of-line definitions of static members.
!(Old->getLexicalDeclContext()->isRecord() &&
!New->getLexicalDeclContext()->isRecord())) {
Diag(New->getLocation(), diag::err_redefinition) << New->getDeclName();
Diag(OldLocation, PrevDiag);
return New->setInvalidDecl();
}
if (New->isInline() && !Old->getMostRecentDecl()->isInline()) {
if (VarDecl *Def = Old->getDefinition()) {
// C++1z [dcl.fcn.spec]p4:
// If the definition of a variable appears in a translation unit before
// its first declaration as inline, the program is ill-formed.
Diag(New->getLocation(), diag::err_inline_decl_follows_def) << New;
Diag(Def->getLocation(), diag::note_previous_definition);
}
}
// If this redeclaration makes the variable inline, we may need to add it to
// UndefinedButUsed.
if (!Old->isInline() && New->isInline() && Old->isUsed(false) &&
!Old->getDefinition() && !New->isThisDeclarationADefinition())
UndefinedButUsed.insert(std::make_pair(Old->getCanonicalDecl(),
SourceLocation()));
if (New->getTLSKind() != Old->getTLSKind()) {
if (!Old->getTLSKind()) {
Diag(New->getLocation(), diag::err_thread_non_thread) << New->getDeclName();
Diag(OldLocation, PrevDiag);
} else if (!New->getTLSKind()) {
Diag(New->getLocation(), diag::err_non_thread_thread) << New->getDeclName();
Diag(OldLocation, PrevDiag);
} else {
// Do not allow redeclaration to change the variable between requiring
// static and dynamic initialization.
// FIXME: GCC allows this, but uses the TLS keyword on the first
// declaration to determine the kind. Do we need to be compatible here?
Diag(New->getLocation(), diag::err_thread_thread_different_kind)
<< New->getDeclName() << (New->getTLSKind() == VarDecl::TLS_Dynamic);
Diag(OldLocation, PrevDiag);
}
}
// C++ doesn't have tentative definitions, so go right ahead and check here.
if (getLangOpts().CPlusPlus &&
New->isThisDeclarationADefinition() == VarDecl::Definition) {
if (Old->isStaticDataMember() && Old->getCanonicalDecl()->isInline() &&
Old->getCanonicalDecl()->isConstexpr()) {
// This definition won't be a definition any more once it's been merged.
Diag(New->getLocation(),
diag::warn_deprecated_redundant_constexpr_static_def);
} else if (VarDecl *Def = Old->getDefinition()) {
if (checkVarDeclRedefinition(Def, New))
return;
}
}
if (haveIncompatibleLanguageLinkages(Old, New)) {
Diag(New->getLocation(), diag::err_different_language_linkage) << New;
Diag(OldLocation, PrevDiag);
New->setInvalidDecl();
return;
}
// Merge "used" flag.
if (Old->getMostRecentDecl()->isUsed(false))
New->setIsUsed();
// Keep a chain of previous declarations.
New->setPreviousDecl(Old);
if (NewTemplate)
NewTemplate->setPreviousDecl(OldTemplate);
adjustDeclContextForDeclaratorDecl(New, Old);
// Inherit access appropriately.
New->setAccess(Old->getAccess());
if (NewTemplate)
NewTemplate->setAccess(New->getAccess());
if (Old->isInline())
New->setImplicitlyInline();
}
void Sema::notePreviousDefinition(const NamedDecl *Old, SourceLocation New) {
SourceManager &SrcMgr = getSourceManager();
auto FNewDecLoc = SrcMgr.getDecomposedLoc(New);
auto FOldDecLoc = SrcMgr.getDecomposedLoc(Old->getLocation());
auto *FNew = SrcMgr.getFileEntryForID(FNewDecLoc.first);
auto *FOld = SrcMgr.getFileEntryForID(FOldDecLoc.first);
auto &HSI = PP.getHeaderSearchInfo();
StringRef HdrFilename =
SrcMgr.getFilename(SrcMgr.getSpellingLoc(Old->getLocation()));
auto noteFromModuleOrInclude = [&](Module *Mod,
SourceLocation IncLoc) -> bool {
// Redefinition errors with modules are common with non modular mapped
// headers, example: a non-modular header H in module A that also gets
// included directly in a TU. Pointing twice to the same header/definition
// is confusing, try to get better diagnostics when modules is on.
if (IncLoc.isValid()) {
if (Mod) {
Diag(IncLoc, diag::note_redefinition_modules_same_file)
<< HdrFilename.str() << Mod->getFullModuleName();
if (!Mod->DefinitionLoc.isInvalid())
Diag(Mod->DefinitionLoc, diag::note_defined_here)
<< Mod->getFullModuleName();
} else {
Diag(IncLoc, diag::note_redefinition_include_same_file)
<< HdrFilename.str();
}
return true;
}
return false;
};
// Is it the same file and same offset? Provide more information on why
// this leads to a redefinition error.
bool EmittedDiag = false;
if (FNew == FOld && FNewDecLoc.second == FOldDecLoc.second) {
SourceLocation OldIncLoc = SrcMgr.getIncludeLoc(FOldDecLoc.first);
SourceLocation NewIncLoc = SrcMgr.getIncludeLoc(FNewDecLoc.first);
EmittedDiag = noteFromModuleOrInclude(Old->getOwningModule(), OldIncLoc);
EmittedDiag |= noteFromModuleOrInclude(getCurrentModule(), NewIncLoc);
// If the header has no guards, emit a note suggesting one.
if (FOld && !HSI.isFileMultipleIncludeGuarded(FOld))
Diag(Old->getLocation(), diag::note_use_ifdef_guards);
if (EmittedDiag)
return;
}
// Redefinition coming from different files or couldn't do better above.
if (Old->getLocation().isValid())
Diag(Old->getLocation(), diag::note_previous_definition);
}
/// We've just determined that \p Old and \p New both appear to be definitions
/// of the same variable. Either diagnose or fix the problem.
bool Sema::checkVarDeclRedefinition(VarDecl *Old, VarDecl *New) {
if (!hasVisibleDefinition(Old) &&
(New->getFormalLinkage() == InternalLinkage ||
New->isInline() ||
New->getDescribedVarTemplate() ||
New->getNumTemplateParameterLists() ||
New->getDeclContext()->isDependentContext())) {
// The previous definition is hidden, and multiple definitions are
// permitted (in separate TUs). Demote this to a declaration.
New->demoteThisDefinitionToDeclaration();
// Make the canonical definition visible.
if (auto *OldTD = Old->getDescribedVarTemplate())
makeMergedDefinitionVisible(OldTD);
makeMergedDefinitionVisible(Old);
return false;
} else {
Diag(New->getLocation(), diag::err_redefinition) << New;
notePreviousDefinition(Old, New->getLocation());
New->setInvalidDecl();
return true;
}
}
/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
/// no declarator (e.g. "struct foo;") is parsed.
Decl *
Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord) {
return ParsedFreeStandingDeclSpec(S, AS, DS, MultiTemplateParamsArg(), false,
AnonRecord);
}
// The MS ABI changed between VS2013 and VS2015 with regard to numbers used to
// disambiguate entities defined in different scopes.
// While the VS2015 ABI fixes potential miscompiles, it is also breaks
// compatibility.
// We will pick our mangling number depending on which version of MSVC is being
// targeted.
static unsigned getMSManglingNumber(const LangOptions &LO, Scope *S) {
return LO.isCompatibleWithMSVC(LangOptions::MSVC2015)
? S->getMSCurManglingNumber()
: S->getMSLastManglingNumber();
}
void Sema::handleTagNumbering(const TagDecl *Tag, Scope *TagScope) {
if (!Context.getLangOpts().CPlusPlus)
return;
if (isa<CXXRecordDecl>(Tag->getParent())) {
// If this tag is the direct child of a class, number it if
// it is anonymous.
if (!Tag->getName().empty() || Tag->getTypedefNameForAnonDecl())
return;
MangleNumberingContext &MCtx =
Context.getManglingNumberContext(Tag->getParent());
Context.setManglingNumber(
Tag, MCtx.getManglingNumber(
Tag, getMSManglingNumber(getLangOpts(), TagScope)));
return;
}
// If this tag isn't a direct child of a class, number it if it is local.
Decl *ManglingContextDecl;
if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext(
Tag->getDeclContext(), ManglingContextDecl)) {
Context.setManglingNumber(
Tag, MCtx->getManglingNumber(
Tag, getMSManglingNumber(getLangOpts(), TagScope)));
}
}
void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD) {
if (TagFromDeclSpec->isInvalidDecl())
return;
// Do nothing if the tag already has a name for linkage purposes.
if (TagFromDeclSpec->hasNameForLinkage())
return;
// A well-formed anonymous tag must always be a TUK_Definition.
assert(TagFromDeclSpec->isThisDeclarationADefinition());
// The type must match the tag exactly; no qualifiers allowed.
if (!Context.hasSameType(NewTD->getUnderlyingType(),
Context.getTagDeclType(TagFromDeclSpec))) {
if (getLangOpts().CPlusPlus)
Context.addTypedefNameForUnnamedTagDecl(TagFromDeclSpec, NewTD);
return;
}
// If we've already computed linkage for the anonymous tag, then
// adding a typedef name for the anonymous decl can change that
// linkage, which might be a serious problem. Diagnose this as
// unsupported and ignore the typedef name. TODO: we should
// pursue this as a language defect and establish a formal rule
// for how to handle it.
if (TagFromDeclSpec->hasLinkageBeenComputed()) {
Diag(NewTD->getLocation(), diag::err_typedef_changes_linkage);
SourceLocation tagLoc = TagFromDeclSpec->getInnerLocStart();
tagLoc = getLocForEndOfToken(tagLoc);
llvm::SmallString<40> textToInsert;
textToInsert += ' ';
textToInsert += NewTD->getIdentifier()->getName();
Diag(tagLoc, diag::note_typedef_changes_linkage)
<< FixItHint::CreateInsertion(tagLoc, textToInsert);
return;
}
// Otherwise, set this is the anon-decl typedef for the tag.
TagFromDeclSpec->setTypedefNameForAnonDecl(NewTD);
}
static unsigned GetDiagnosticTypeSpecifierID(DeclSpec::TST T) {
switch (T) {
case DeclSpec::TST_class:
return 0;
case DeclSpec::TST_struct:
return 1;
case DeclSpec::TST_interface:
return 2;
case DeclSpec::TST_union:
return 3;
case DeclSpec::TST_enum:
return 4;
default:
llvm_unreachable("unexpected type specifier");
}
}
/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
/// no declarator (e.g. "struct foo;") is parsed. It also accepts template
/// parameters to cope with template friend declarations.
Decl *
Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord) {
Decl *TagD = nullptr;
TagDecl *Tag = nullptr;
if (DS.getTypeSpecType() == DeclSpec::TST_class ||
DS.getTypeSpecType() == DeclSpec::TST_struct ||
DS.getTypeSpecType() == DeclSpec::TST_interface ||
DS.getTypeSpecType() == DeclSpec::TST_union ||
DS.getTypeSpecType() == DeclSpec::TST_enum) {
TagD = DS.getRepAsDecl();
if (!TagD) // We probably had an error
return nullptr;
// Note that the above type specs guarantee that the
// type rep is a Decl, whereas in many of the others
// it's a Type.
if (isa<TagDecl>(TagD))
Tag = cast<TagDecl>(TagD);
else if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(TagD))
Tag = CTD->getTemplatedDecl();
}
if (Tag) {
handleTagNumbering(Tag, S);
Tag->setFreeStanding();
if (Tag->isInvalidDecl())
return Tag;
}
if (unsigned TypeQuals = DS.getTypeQualifiers()) {
// Enforce C99 6.7.3p2: "Types other than pointer types derived from object
// or incomplete types shall not be restrict-qualified."
if (TypeQuals & DeclSpec::TQ_restrict)
Diag(DS.getRestrictSpecLoc(),
diag::err_typecheck_invalid_restrict_not_pointer_noarg)
<< DS.getSourceRange();
}
if (DS.isInlineSpecified())
Diag(DS.getInlineSpecLoc(), diag::err_inline_non_function)
<< getLangOpts().CPlusPlus17;
if (DS.isConstexprSpecified()) {
// C++0x [dcl.constexpr]p1: constexpr can only be applied to declarations
// and definitions of functions and variables.
if (Tag)
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_tag)
<< GetDiagnosticTypeSpecifierID(DS.getTypeSpecType());
else
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_no_declarators);
// Don't emit warnings after this error.
return TagD;
}
DiagnoseFunctionSpecifiers(DS);
if (DS.isFriendSpecified()) {
// If we're dealing with a decl but not a TagDecl, assume that
// whatever routines created it handled the friendship aspect.
if (TagD && !Tag)
return nullptr;
return ActOnFriendTypeDecl(S, DS, TemplateParams);
}
const CXXScopeSpec &SS = DS.getTypeSpecScope();
bool IsExplicitSpecialization =
!TemplateParams.empty() && TemplateParams.back()->size() == 0;
if (Tag && SS.isNotEmpty() && !Tag->isCompleteDefinition() &&
!IsExplicitInstantiation && !IsExplicitSpecialization &&
!isa<ClassTemplatePartialSpecializationDecl>(Tag)) {
// Per C++ [dcl.type.elab]p1, a class declaration cannot have a
// nested-name-specifier unless it is an explicit instantiation
// or an explicit specialization.
//
// FIXME: We allow class template partial specializations here too, per the
// obvious intent of DR1819.
//
// Per C++ [dcl.enum]p1, an opaque-enum-declaration can't either.
Diag(SS.getBeginLoc(), diag::err_standalone_class_nested_name_specifier)
<< GetDiagnosticTypeSpecifierID(DS.getTypeSpecType()) << SS.getRange();
return nullptr;
}
// Track whether this decl-specifier declares anything.
bool DeclaresAnything = true;
// Handle anonymous struct definitions.
if (RecordDecl *Record = dyn_cast_or_null<RecordDecl>(Tag)) {
if (!Record->getDeclName() && Record->isCompleteDefinition() &&
DS.getStorageClassSpec() != DeclSpec::SCS_typedef) {
if (getLangOpts().CPlusPlus ||
Record->getDeclContext()->isRecord()) {
// If CurContext is a DeclContext that can contain statements,
// RecursiveASTVisitor won't visit the decls that
// BuildAnonymousStructOrUnion() will put into CurContext.
// Also store them here so that they can be part of the
// DeclStmt that gets created in this case.
// FIXME: Also return the IndirectFieldDecls created by
// BuildAnonymousStructOr union, for the same reason?
if (CurContext->isFunctionOrMethod())
AnonRecord = Record;
return BuildAnonymousStructOrUnion(S, DS, AS, Record,
Context.getPrintingPolicy());
}
DeclaresAnything = false;
}
}
// C11 6.7.2.1p2:
// A struct-declaration that does not declare an anonymous structure or
// anonymous union shall contain a struct-declarator-list.
//
// This rule also existed in C89 and C99; the grammar for struct-declaration
// did not permit a struct-declaration without a struct-declarator-list.
if (!getLangOpts().CPlusPlus && CurContext->isRecord() &&
DS.getStorageClassSpec() == DeclSpec::SCS_unspecified) {
// Check for Microsoft C extension: anonymous struct/union member.
// Handle 2 kinds of anonymous struct/union:
// struct STRUCT;
// union UNION;
// and
// STRUCT_TYPE; <- where STRUCT_TYPE is a typedef struct.
// UNION_TYPE; <- where UNION_TYPE is a typedef union.
if ((Tag && Tag->getDeclName()) ||
DS.getTypeSpecType() == DeclSpec::TST_typename) {
RecordDecl *Record = nullptr;
if (Tag)
Record = dyn_cast<RecordDecl>(Tag);
else if (const RecordType *RT =
DS.getRepAsType().get()->getAsStructureType())
Record = RT->getDecl();
else if (const RecordType *UT = DS.getRepAsType().get()->getAsUnionType())
Record = UT->getDecl();
if (Record && getLangOpts().MicrosoftExt) {
Diag(DS.getLocStart(), diag::ext_ms_anonymous_record)
<< Record->isUnion() << DS.getSourceRange();
return BuildMicrosoftCAnonymousStruct(S, DS, Record);
}
DeclaresAnything = false;
}
}
// Skip all the checks below if we have a type error.
if (DS.getTypeSpecType() == DeclSpec::TST_error ||
(TagD && TagD->isInvalidDecl()))
return TagD;
if (getLangOpts().CPlusPlus &&
DS.getStorageClassSpec() != DeclSpec::SCS_typedef)
if (EnumDecl *Enum = dyn_cast_or_null<EnumDecl>(Tag))
if (Enum->enumerator_begin() == Enum->enumerator_end() &&
!Enum->getIdentifier() && !Enum->isInvalidDecl())
DeclaresAnything = false;
if (!DS.isMissingDeclaratorOk()) {
// Customize diagnostic for a typedef missing a name.
if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef)
Diag(DS.getLocStart(), diag::ext_typedef_without_a_name)
<< DS.getSourceRange();
else
DeclaresAnything = false;
}
if (DS.isModulePrivateSpecified() &&
Tag && Tag->getDeclContext()->isFunctionOrMethod())
Diag(DS.getModulePrivateSpecLoc(), diag::err_module_private_local_class)
<< Tag->getTagKind()
<< FixItHint::CreateRemoval(DS.getModulePrivateSpecLoc());
ActOnDocumentableDecl(TagD);
// C 6.7/2:
// A declaration [...] shall declare at least a declarator [...], a tag,
// or the members of an enumeration.
// C++ [dcl.dcl]p3:
// [If there are no declarators], and except for the declaration of an
// unnamed bit-field, the decl-specifier-seq shall introduce one or more
// names into the program, or shall redeclare a name introduced by a
// previous declaration.
if (!DeclaresAnything) {
// In C, we allow this as a (popular) extension / bug. Don't bother
// producing further diagnostics for redundant qualifiers after this.
Diag(DS.getLocStart(), diag::ext_no_declarators) << DS.getSourceRange();
return TagD;
}
// C++ [dcl.stc]p1:
// If a storage-class-specifier appears in a decl-specifier-seq, [...] the
// init-declarator-list of the declaration shall not be empty.
// C++ [dcl.fct.spec]p1:
// If a cv-qualifier appears in a decl-specifier-seq, the
// init-declarator-list of the declaration shall not be empty.
//
// Spurious qualifiers here appear to be valid in C.
unsigned DiagID = diag::warn_standalone_specifier;
if (getLangOpts().CPlusPlus)
DiagID = diag::ext_standalone_specifier;
// Note that a linkage-specification sets a storage class, but
// 'extern "C" struct foo;' is actually valid and not theoretically
// useless.
if (DeclSpec::SCS SCS = DS.getStorageClassSpec()) {
if (SCS == DeclSpec::SCS_mutable)
// Since mutable is not a viable storage class specifier in C, there is
// no reason to treat it as an extension. Instead, diagnose as an error.
Diag(DS.getStorageClassSpecLoc(), diag::err_mutable_nonmember);
else if (!DS.isExternInLinkageSpec() && SCS != DeclSpec::SCS_typedef)
Diag(DS.getStorageClassSpecLoc(), DiagID)
<< DeclSpec::getSpecifierName(SCS);
}
if (DeclSpec::TSCS TSCS = DS.getThreadStorageClassSpec())
Diag(DS.getThreadStorageClassSpecLoc(), DiagID)
<< DeclSpec::getSpecifierName(TSCS);
if (DS.getTypeQualifiers()) {
if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
Diag(DS.getConstSpecLoc(), DiagID) << "const";
if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
Diag(DS.getConstSpecLoc(), DiagID) << "volatile";
// Restrict is covered above.
if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic)
Diag(DS.getAtomicSpecLoc(), DiagID) << "_Atomic";
if (DS.getTypeQualifiers() & DeclSpec::TQ_unaligned)
Diag(DS.getUnalignedSpecLoc(), DiagID) << "__unaligned";
}
// Warn about ignored type attributes, for example:
// __attribute__((aligned)) struct A;
// Attributes should be placed after tag to apply to type declaration.
if (!DS.getAttributes().empty()) {
DeclSpec::TST TypeSpecType = DS.getTypeSpecType();
if (TypeSpecType == DeclSpec::TST_class ||
TypeSpecType == DeclSpec::TST_struct ||
TypeSpecType == DeclSpec::TST_interface ||
TypeSpecType == DeclSpec::TST_union ||
TypeSpecType == DeclSpec::TST_enum) {
for (const ParsedAttr &AL : DS.getAttributes())
Diag(AL.getLoc(), diag::warn_declspec_attribute_ignored)
<< AL.getName() << GetDiagnosticTypeSpecifierID(TypeSpecType);
}
}
return TagD;
}
/// We are trying to inject an anonymous member into the given scope;
/// check if there's an existing declaration that can't be overloaded.
///
/// \return true if this is a forbidden redeclaration
static bool CheckAnonMemberRedeclaration(Sema &SemaRef,
Scope *S,
DeclContext *Owner,
DeclarationName Name,
SourceLocation NameLoc,
bool IsUnion) {
LookupResult R(SemaRef, Name, NameLoc, Sema::LookupMemberName,
Sema::ForVisibleRedeclaration);
if (!SemaRef.LookupName(R, S)) return false;
// Pick a representative declaration.
NamedDecl *PrevDecl = R.getRepresentativeDecl()->getUnderlyingDecl();
assert(PrevDecl && "Expected a non-null Decl");
if (!SemaRef.isDeclInScope(PrevDecl, Owner, S))
return false;
SemaRef.Diag(NameLoc, diag::err_anonymous_record_member_redecl)
<< IsUnion << Name;
SemaRef.Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
return true;
}
/// InjectAnonymousStructOrUnionMembers - Inject the members of the
/// anonymous struct or union AnonRecord into the owning context Owner
/// and scope S. This routine will be invoked just after we realize
/// that an unnamed union or struct is actually an anonymous union or
/// struct, e.g.,
///
/// @code
/// union {
/// int i;
/// float f;
/// }; // InjectAnonymousStructOrUnionMembers called here to inject i and
/// // f into the surrounding scope.x
/// @endcode
///
/// This routine is recursive, injecting the names of nested anonymous
/// structs/unions into the owning context and scope as well.
static bool
InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S, DeclContext *Owner,
RecordDecl *AnonRecord, AccessSpecifier AS,
SmallVectorImpl<NamedDecl *> &Chaining) {
bool Invalid = false;
// Look every FieldDecl and IndirectFieldDecl with a name.
for (auto *D : AnonRecord->decls()) {
if ((isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D)) &&
cast<NamedDecl>(D)->getDeclName()) {
ValueDecl *VD = cast<ValueDecl>(D);
if (CheckAnonMemberRedeclaration(SemaRef, S, Owner, VD->getDeclName(),
VD->getLocation(),
AnonRecord->isUnion())) {
// C++ [class.union]p2:
// The names of the members of an anonymous union shall be
// distinct from the names of any other entity in the
// scope in which the anonymous union is declared.
Invalid = true;
} else {
// C++ [class.union]p2:
// For the purpose of name lookup, after the anonymous union
// definition, the members of the anonymous union are
// considered to have been defined in the scope in which the
// anonymous union is declared.
unsigned OldChainingSize = Chaining.size();
if (IndirectFieldDecl *IF = dyn_cast<IndirectFieldDecl>(VD))
Chaining.append(IF->chain_begin(), IF->chain_end());
else
Chaining.push_back(VD);
assert(Chaining.size() >= 2);
NamedDecl **NamedChain =
new (SemaRef.Context)NamedDecl*[Chaining.size()];
for (unsigned i = 0; i < Chaining.size(); i++)
NamedChain[i] = Chaining[i];
IndirectFieldDecl *IndirectField = IndirectFieldDecl::Create(
SemaRef.Context, Owner, VD->getLocation(), VD->getIdentifier(),
VD->getType(), {NamedChain, Chaining.size()});
for (const auto *Attr : VD->attrs())
IndirectField->addAttr(Attr->clone(SemaRef.Context));
IndirectField->setAccess(AS);
IndirectField->setImplicit();
SemaRef.PushOnScopeChains(IndirectField, S);
// That includes picking up the appropriate access specifier.
if (AS != AS_none) IndirectField->setAccess(AS);
Chaining.resize(OldChainingSize);
}
}
}
return Invalid;
}
/// StorageClassSpecToVarDeclStorageClass - Maps a DeclSpec::SCS to
/// a VarDecl::StorageClass. Any error reporting is up to the caller:
/// illegal input values are mapped to SC_None.
static StorageClass
StorageClassSpecToVarDeclStorageClass(const DeclSpec &DS) {
DeclSpec::SCS StorageClassSpec = DS.getStorageClassSpec();
assert(StorageClassSpec != DeclSpec::SCS_typedef &&
"Parser allowed 'typedef' as storage class VarDecl.");
switch (StorageClassSpec) {
case DeclSpec::SCS_unspecified: return SC_None;
case DeclSpec::SCS_extern:
if (DS.isExternInLinkageSpec())
return SC_None;
return SC_Extern;
case DeclSpec::SCS_static: return SC_Static;
case DeclSpec::SCS_auto: return SC_Auto;
case DeclSpec::SCS_register: return SC_Register;
case DeclSpec::SCS_private_extern: return SC_PrivateExtern;
// Illegal SCSs map to None: error reporting is up to the caller.
case DeclSpec::SCS_mutable: // Fall through.
case DeclSpec::SCS_typedef: return SC_None;
}
llvm_unreachable("unknown storage class specifier");
}
static SourceLocation findDefaultInitializer(const CXXRecordDecl *Record) {
assert(Record->hasInClassInitializer());
for (const auto *I : Record->decls()) {
const auto *FD = dyn_cast<FieldDecl>(I);
if (const auto *IFD = dyn_cast<IndirectFieldDecl>(I))
FD = IFD->getAnonField();
if (FD && FD->hasInClassInitializer())
return FD->getLocation();
}
llvm_unreachable("couldn't find in-class initializer");
}
static void checkDuplicateDefaultInit(Sema &S, CXXRecordDecl *Parent,
SourceLocation DefaultInitLoc) {
if (!Parent->isUnion() || !Parent->hasInClassInitializer())
return;
S.Diag(DefaultInitLoc, diag::err_multiple_mem_union_initialization);
S.Diag(findDefaultInitializer(Parent), diag::note_previous_initializer) << 0;
}
static void checkDuplicateDefaultInit(Sema &S, CXXRecordDecl *Parent,
CXXRecordDecl *AnonUnion) {
if (!Parent->isUnion() || !Parent->hasInClassInitializer())
return;
checkDuplicateDefaultInit(S, Parent, findDefaultInitializer(AnonUnion));
}
/// BuildAnonymousStructOrUnion - Handle the declaration of an
/// anonymous structure or union. Anonymous unions are a C++ feature
/// (C++ [class.union]) and a C11 feature; anonymous structures
/// are a C11 feature and GNU C++ extension.
Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy) {
DeclContext *Owner = Record->getDeclContext();
// Diagnose whether this anonymous struct/union is an extension.
if (Record->isUnion() && !getLangOpts().CPlusPlus && !getLangOpts().C11)
Diag(Record->getLocation(), diag::ext_anonymous_union);
else if (!Record->isUnion() && getLangOpts().CPlusPlus)
Diag(Record->getLocation(), diag::ext_gnu_anonymous_struct);
else if (!Record->isUnion() && !getLangOpts().C11)
Diag(Record->getLocation(), diag::ext_c11_anonymous_struct);
// C and C++ require different kinds of checks for anonymous
// structs/unions.
bool Invalid = false;
if (getLangOpts().CPlusPlus) {
const char *PrevSpec = nullptr;
unsigned DiagID;
if (Record->isUnion()) {
// C++ [class.union]p6:
// C++17 [class.union.anon]p2:
// Anonymous unions declared in a named namespace or in the
// global namespace shall be declared static.
DeclContext *OwnerScope = Owner->getRedeclContext();
if (DS.getStorageClassSpec() != DeclSpec::SCS_static &&
(OwnerScope->isTranslationUnit() ||
(OwnerScope->isNamespace() &&
!cast<NamespaceDecl>(OwnerScope)->isAnonymousNamespace()))) {
Diag(Record->getLocation(), diag::err_anonymous_union_not_static)
<< FixItHint::CreateInsertion(Record->getLocation(), "static ");
// Recover by adding 'static'.
DS.SetStorageClassSpec(*this, DeclSpec::SCS_static, SourceLocation(),
PrevSpec, DiagID, Policy);
}
// C++ [class.union]p6:
// A storage class is not allowed in a declaration of an
// anonymous union in a class scope.
else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified &&
isa<RecordDecl>(Owner)) {
Diag(DS.getStorageClassSpecLoc(),
diag::err_anonymous_union_with_storage_spec)
<< FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
// Recover by removing the storage specifier.
DS.SetStorageClassSpec(*this, DeclSpec::SCS_unspecified,
SourceLocation(),
PrevSpec, DiagID, Context.getPrintingPolicy());
}
}
// Ignore const/volatile/restrict qualifiers.
if (DS.getTypeQualifiers()) {
if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
Diag(DS.getConstSpecLoc(), diag::ext_anonymous_struct_union_qualified)
<< Record->isUnion() << "const"
<< FixItHint::CreateRemoval(DS.getConstSpecLoc());
if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
Diag(DS.getVolatileSpecLoc(),
diag::ext_anonymous_struct_union_qualified)
<< Record->isUnion() << "volatile"
<< FixItHint::CreateRemoval(DS.getVolatileSpecLoc());
if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict)
Diag(DS.getRestrictSpecLoc(),
diag::ext_anonymous_struct_union_qualified)
<< Record->isUnion() << "restrict"
<< FixItHint::CreateRemoval(DS.getRestrictSpecLoc());
if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic)
Diag(DS.getAtomicSpecLoc(),
diag::ext_anonymous_struct_union_qualified)
<< Record->isUnion() << "_Atomic"
<< FixItHint::CreateRemoval(DS.getAtomicSpecLoc());
if (DS.getTypeQualifiers() & DeclSpec::TQ_unaligned)
Diag(DS.getUnalignedSpecLoc(),
diag::ext_anonymous_struct_union_qualified)
<< Record->isUnion() << "__unaligned"
<< FixItHint::CreateRemoval(DS.getUnalignedSpecLoc());
DS.ClearTypeQualifiers();
}
// C++ [class.union]p2:
// The member-specification of an anonymous union shall only
// define non-static data members. [Note: nested types and
// functions cannot be declared within an anonymous union. ]
for (auto *Mem : Record->decls()) {
if (auto *FD = dyn_cast<FieldDecl>(Mem)) {
// C++ [class.union]p3:
// An anonymous union shall not have private or protected
// members (clause 11).
assert(FD->getAccess() != AS_none);
if (FD->getAccess() != AS_public) {
Diag(FD->getLocation(), diag::err_anonymous_record_nonpublic_member)
<< Record->isUnion() << (FD->getAccess() == AS_protected);
Invalid = true;
}
// C++ [class.union]p1
// An object of a class with a non-trivial constructor, a non-trivial
// copy constructor, a non-trivial destructor, or a non-trivial copy
// assignment operator cannot be a member of a union, nor can an
// array of such objects.
if (CheckNontrivialField(FD))
Invalid = true;
} else if (Mem->isImplicit()) {
// Any implicit members are fine.
} else if (isa<TagDecl>(Mem) && Mem->getDeclContext() != Record) {
// This is a type that showed up in an
// elaborated-type-specifier inside the anonymous struct or
// union, but which actually declares a type outside of the
// anonymous struct or union. It's okay.
} else if (auto *MemRecord = dyn_cast<RecordDecl>(Mem)) {
if (!MemRecord->isAnonymousStructOrUnion() &&
MemRecord->getDeclName()) {
// Visual C++ allows type definition in anonymous struct or union.
if (getLangOpts().MicrosoftExt)
Diag(MemRecord->getLocation(), diag::ext_anonymous_record_with_type)
<< Record->isUnion();
else {
// This is a nested type declaration.
Diag(MemRecord->getLocation(), diag::err_anonymous_record_with_type)
<< Record->isUnion();
Invalid = true;
}
} else {
// This is an anonymous type definition within another anonymous type.
// This is a popular extension, provided by Plan9, MSVC and GCC, but
// not part of standard C++.
Diag(MemRecord->getLocation(),
diag::ext_anonymous_record_with_anonymous_type)
<< Record->isUnion();
}
} else if (isa<AccessSpecDecl>(Mem)) {
// Any access specifier is fine.
} else if (isa<StaticAssertDecl>(Mem)) {
// In C++1z, static_assert declarations are also fine.
} else {
// We have something that isn't a non-static data
// member. Complain about it.
unsigned DK = diag::err_anonymous_record_bad_member;
if (isa<TypeDecl>(Mem))
DK = diag::err_anonymous_record_with_type;
else if (isa<FunctionDecl>(Mem))
DK = diag::err_anonymous_record_with_function;
else if (isa<VarDecl>(Mem))
DK = diag::err_anonymous_record_with_static;
// Visual C++ allows type definition in anonymous struct or union.
if (getLangOpts().MicrosoftExt &&
DK == diag::err_anonymous_record_with_type)
Diag(Mem->getLocation(), diag::ext_anonymous_record_with_type)
<< Record->isUnion();
else {
Diag(Mem->getLocation(), DK) << Record->isUnion();
Invalid = true;
}
}
}
// C++11 [class.union]p8 (DR1460):
// At most one variant member of a union may have a
// brace-or-equal-initializer.
if (cast<CXXRecordDecl>(Record)->hasInClassInitializer() &&
Owner->isRecord())
checkDuplicateDefaultInit(*this, cast<CXXRecordDecl>(Owner),
cast<CXXRecordDecl>(Record));
}
if (!Record->isUnion() && !Owner->isRecord()) {
Diag(Record->getLocation(), diag::err_anonymous_struct_not_member)
<< getLangOpts().CPlusPlus;
Invalid = true;
}
// Mock up a declarator.
Declarator Dc(DS, DeclaratorContext::MemberContext);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct/union");
// Create a declaration for this anonymous struct/union.
NamedDecl *Anon = nullptr;
if (RecordDecl *OwningClass = dyn_cast<RecordDecl>(Owner)) {
Anon = FieldDecl::Create(Context, OwningClass,
DS.getLocStart(),
Record->getLocation(),
/*IdentifierInfo=*/nullptr,
Context.getTypeDeclType(Record),
TInfo,
/*BitWidth=*/nullptr, /*Mutable=*/false,
/*InitStyle=*/ICIS_NoInit);
Anon->setAccess(AS);
if (getLangOpts().CPlusPlus)
FieldCollector->Add(cast<FieldDecl>(Anon));
} else {
DeclSpec::SCS SCSpec = DS.getStorageClassSpec();
StorageClass SC = StorageClassSpecToVarDeclStorageClass(DS);
if (SCSpec == DeclSpec::SCS_mutable) {
// mutable can only appear on non-static class members, so it's always
// an error here
Diag(Record->getLocation(), diag::err_mutable_nonmember);
Invalid = true;
SC = SC_None;
}
Anon = VarDecl::Create(Context, Owner,
DS.getLocStart(),
Record->getLocation(), /*IdentifierInfo=*/nullptr,
Context.getTypeDeclType(Record),
TInfo, SC);
// Default-initialize the implicit variable. This initialization will be
// trivial in almost all cases, except if a union member has an in-class
// initializer:
// union { int n = 0; };
ActOnUninitializedDecl(Anon);
}
Anon->setImplicit();
// Mark this as an anonymous struct/union type.
Record->setAnonymousStructOrUnion(true);
// Add the anonymous struct/union object to the current
// context. We'll be referencing this object when we refer to one of
// its members.
Owner->addDecl(Anon);
// Inject the members of the anonymous struct/union into the owning
// context and into the identifier resolver chain for name lookup
// purposes.
SmallVector<NamedDecl*, 2> Chain;
Chain.push_back(Anon);
if (InjectAnonymousStructOrUnionMembers(*this, S, Owner, Record, AS, Chain))
Invalid = true;
if (VarDecl *NewVD = dyn_cast<VarDecl>(Anon)) {
if (getLangOpts().CPlusPlus && NewVD->isStaticLocal()) {
Decl *ManglingContextDecl;
if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext(
NewVD->getDeclContext(), ManglingContextDecl)) {
Context.setManglingNumber(
NewVD, MCtx->getManglingNumber(
NewVD, getMSManglingNumber(getLangOpts(), S)));
Context.setStaticLocalNumber(NewVD, MCtx->getStaticLocalNumber(NewVD));
}
}
}
if (Invalid)
Anon->setInvalidDecl();
return Anon;
}
/// BuildMicrosoftCAnonymousStruct - Handle the declaration of an
/// Microsoft C anonymous structure.
/// Ref: http://msdn.microsoft.com/en-us/library/z2cx9y4f.aspx
/// Example:
///
/// struct A { int a; };
/// struct B { struct A; int b; };
///
/// void foo() {
/// B var;
/// var.a = 3;
/// }
///
Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record) {
assert(Record && "expected a record!");
// Mock up a declarator.
Declarator Dc(DS, DeclaratorContext::TypeNameContext);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct");
auto *ParentDecl = cast<RecordDecl>(CurContext);
QualType RecTy = Context.getTypeDeclType(Record);
// Create a declaration for this anonymous struct.
NamedDecl *Anon = FieldDecl::Create(Context,
ParentDecl,
DS.getLocStart(),
DS.getLocStart(),
/*IdentifierInfo=*/nullptr,
RecTy,
TInfo,
/*BitWidth=*/nullptr, /*Mutable=*/false,
/*InitStyle=*/ICIS_NoInit);
Anon->setImplicit();
// Add the anonymous struct object to the current context.
CurContext->addDecl(Anon);
// Inject the members of the anonymous struct into the current
// context and into the identifier resolver chain for name lookup
// purposes.
SmallVector<NamedDecl*, 2> Chain;
Chain.push_back(Anon);
RecordDecl *RecordDef = Record->getDefinition();
if (RequireCompleteType(Anon->getLocation(), RecTy,
diag::err_field_incomplete) ||
InjectAnonymousStructOrUnionMembers(*this, S, CurContext, RecordDef,
AS_none, Chain)) {
Anon->setInvalidDecl();
ParentDecl->setInvalidDecl();
}
return Anon;
}
/// GetNameForDeclarator - Determine the full declaration name for the
/// given Declarator.
DeclarationNameInfo Sema::GetNameForDeclarator(Declarator &D) {
return GetNameFromUnqualifiedId(D.getName());
}
/// Retrieves the declaration name from a parsed unqualified-id.
DeclarationNameInfo
Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
DeclarationNameInfo NameInfo;
NameInfo.setLoc(Name.StartLocation);
switch (Name.getKind()) {
case UnqualifiedIdKind::IK_ImplicitSelfParam:
case UnqualifiedIdKind::IK_Identifier:
NameInfo.setName(Name.Identifier);
NameInfo.setLoc(Name.StartLocation);
return NameInfo;
case UnqualifiedIdKind::IK_DeductionGuideName: {
// C++ [temp.deduct.guide]p3:
// The simple-template-id shall name a class template specialization.
// The template-name shall be the same identifier as the template-name
// of the simple-template-id.
// These together intend to imply that the template-name shall name a
// class template.
// FIXME: template<typename T> struct X {};
// template<typename T> using Y = X<T>;
// Y(int) -> Y<int>;
// satisfies these rules but does not name a class template.
TemplateName TN = Name.TemplateName.get().get();
auto *Template = TN.getAsTemplateDecl();
if (!Template || !isa<ClassTemplateDecl>(Template)) {
Diag(Name.StartLocation,
diag::err_deduction_guide_name_not_class_template)
<< (int)getTemplateNameKindForDiagnostics(TN) << TN;
if (Template)
Diag(Template->getLocation(), diag::note_template_decl_here);
return DeclarationNameInfo();
}
NameInfo.setName(
Context.DeclarationNames.getCXXDeductionGuideName(Template));
NameInfo.setLoc(Name.StartLocation);
return NameInfo;
}
case UnqualifiedIdKind::IK_OperatorFunctionId:
NameInfo.setName(Context.DeclarationNames.getCXXOperatorName(
Name.OperatorFunctionId.Operator));
NameInfo.setLoc(Name.StartLocation);
NameInfo.getInfo().CXXOperatorName.BeginOpNameLoc
= Name.OperatorFunctionId.SymbolLocations[0];
NameInfo.getInfo().CXXOperatorName.EndOpNameLoc
= Name.EndLocation.getRawEncoding();
return NameInfo;
case UnqualifiedIdKind::IK_LiteralOperatorId:
NameInfo.setName(Context.DeclarationNames.getCXXLiteralOperatorName(
Name.Identifier));
NameInfo.setLoc(Name.StartLocation);
NameInfo.setCXXLiteralOperatorNameLoc(Name.EndLocation);
return NameInfo;
case UnqualifiedIdKind::IK_ConversionFunctionId: {
TypeSourceInfo *TInfo;
QualType Ty = GetTypeFromParser(Name.ConversionFunctionId, &TInfo);
if (Ty.isNull())
return DeclarationNameInfo();
NameInfo.setName(Context.DeclarationNames.getCXXConversionFunctionName(
Context.getCanonicalType(Ty)));
NameInfo.setLoc(Name.StartLocation);
NameInfo.setNamedTypeInfo(TInfo);
return NameInfo;
}
case UnqualifiedIdKind::IK_ConstructorName: {
TypeSourceInfo *TInfo;
QualType Ty = GetTypeFromParser(Name.ConstructorName, &TInfo);
if (Ty.isNull())
return DeclarationNameInfo();
NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
Context.getCanonicalType(Ty)));
NameInfo.setLoc(Name.StartLocation);
NameInfo.setNamedTypeInfo(TInfo);
return NameInfo;
}
case UnqualifiedIdKind::IK_ConstructorTemplateId: {
// In well-formed code, we can only have a constructor
// template-id that refers to the current context, so go there
// to find the actual type being constructed.
CXXRecordDecl *CurClass = dyn_cast<CXXRecordDecl>(CurContext);
if (!CurClass || CurClass->getIdentifier() != Name.TemplateId->Name)
return DeclarationNameInfo();
// Determine the type of the class being constructed.
QualType CurClassType = Context.getTypeDeclType(CurClass);
// FIXME: Check two things: that the template-id names the same type as
// CurClassType, and that the template-id does not occur when the name
// was qualified.
NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
Context.getCanonicalType(CurClassType)));
NameInfo.setLoc(Name.StartLocation);
// FIXME: should we retrieve TypeSourceInfo?
NameInfo.setNamedTypeInfo(nullptr);
return NameInfo;
}
case UnqualifiedIdKind::IK_DestructorName: {
TypeSourceInfo *TInfo;
QualType Ty = GetTypeFromParser(Name.DestructorName, &TInfo);
if (Ty.isNull())
return DeclarationNameInfo();
NameInfo.setName(Context.DeclarationNames.getCXXDestructorName(
Context.getCanonicalType(Ty)));
NameInfo.setLoc(Name.StartLocation);
NameInfo.setNamedTypeInfo(TInfo);
return NameInfo;
}
case UnqualifiedIdKind::IK_TemplateId: {
TemplateName TName = Name.TemplateId->Template.get();
SourceLocation TNameLoc = Name.TemplateId->TemplateNameLoc;
return Context.getNameForTemplate(TName, TNameLoc);
}
} // switch (Name.getKind())
llvm_unreachable("Unknown name kind");
}
static QualType getCoreType(QualType Ty) {
do {
if (Ty->isPointerType() || Ty->isReferenceType())
Ty = Ty->getPointeeType();
else if (Ty->isArrayType())
Ty = Ty->castAsArrayTypeUnsafe()->getElementType();
else
return Ty.withoutLocalFastQualifiers();
} while (true);
}
/// hasSimilarParameters - Determine whether the C++ functions Declaration
/// and Definition have "nearly" matching parameters. This heuristic is
/// used to improve diagnostics in the case where an out-of-line function
/// definition doesn't match any declaration within the class or namespace.
/// Also sets Params to the list of indices to the parameters that differ
/// between the declaration and the definition. If hasSimilarParameters
/// returns true and Params is empty, then all of the parameters match.
static bool hasSimilarParameters(ASTContext &Context,
FunctionDecl *Declaration,
FunctionDecl *Definition,
SmallVectorImpl<unsigned> &Params) {
Params.clear();
if (Declaration->param_size() != Definition->param_size())
return false;
for (unsigned Idx = 0; Idx < Declaration->param_size(); ++Idx) {
QualType DeclParamTy = Declaration->getParamDecl(Idx)->getType();
QualType DefParamTy = Definition->getParamDecl(Idx)->getType();
// The parameter types are identical
if (Context.hasSameType(DefParamTy, DeclParamTy))
continue;
QualType DeclParamBaseTy = getCoreType(DeclParamTy);
QualType DefParamBaseTy = getCoreType(DefParamTy);
const IdentifierInfo *DeclTyName = DeclParamBaseTy.getBaseTypeIdentifier();
const IdentifierInfo *DefTyName = DefParamBaseTy.getBaseTypeIdentifier();
if (Context.hasSameUnqualifiedType(DeclParamBaseTy, DefParamBaseTy) ||
(DeclTyName && DeclTyName == DefTyName))
Params.push_back(Idx);
else // The two parameters aren't even close
return false;
}
return true;
}
/// NeedsRebuildingInCurrentInstantiation - Checks whether the given
/// declarator needs to be rebuilt in the current instantiation.
/// Any bits of declarator which appear before the name are valid for
/// consideration here. That's specifically the type in the decl spec
/// and the base type in any member-pointer chunks.
static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
DeclarationName Name) {
// The types we specifically need to rebuild are:
// - typenames, typeofs, and decltypes
// - types which will become injected class names
// Of course, we also need to rebuild any type referencing such a
// type. It's safest to just say "dependent", but we call out a
// few cases here.
DeclSpec &DS = D.getMutableDeclSpec();
switch (DS.getTypeSpecType()) {
case DeclSpec::TST_typename:
case DeclSpec::TST_typeofType:
case DeclSpec::TST_underlyingType:
case DeclSpec::TST_atomic: {
// Grab the type from the parser.
TypeSourceInfo *TSI = nullptr;
QualType T = S.GetTypeFromParser(DS.getRepAsType(), &TSI);
if (T.isNull() || !T->isDependentType()) break;
// Make sure there's a type source info. This isn't really much
// of a waste; most dependent types should have type source info
// attached already.
if (!TSI)
TSI = S.Context.getTrivialTypeSourceInfo(T, DS.getTypeSpecTypeLoc());
// Rebuild the type in the current instantiation.
TSI = S.RebuildTypeInCurrentInstantiation(TSI, D.getIdentifierLoc(), Name);
if (!TSI) return true;
// Store the new type back in the decl spec.
ParsedType LocType = S.CreateParsedType(TSI->getType(), TSI);
DS.UpdateTypeRep(LocType);
break;
}
case DeclSpec::TST_decltype:
case DeclSpec::TST_typeofExpr: {
Expr *E = DS.getRepAsExpr();
ExprResult Result = S.RebuildExprInCurrentInstantiation(E);
if (Result.isInvalid()) return true;
DS.UpdateExprRep(Result.get());
break;
}
default:
// Nothing to do for these decl specs.
break;
}
// It doesn't matter what order we do this in.
for (unsigned I = 0, E = D.getNumTypeObjects(); I != E; ++I) {
DeclaratorChunk &Chunk = D.getTypeObject(I);
// The only type information in the declarator which can come
// before the declaration name is the base type of a member
// pointer.
if (Chunk.Kind != DeclaratorChunk::MemberPointer)
continue;
// Rebuild the scope specifier in-place.
CXXScopeSpec &SS = Chunk.Mem.Scope();
if (S.RebuildNestedNameSpecifierInCurrentInstantiation(SS))
return true;
}
return false;
}
Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
D.setFunctionDefinitionKind(FDK_Declaration);
Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg());
if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer() &&
Dcl && Dcl->getDeclContext()->isFileContext())
Dcl->setTopLevelDeclInObjCContainer();
if (getLangOpts().OpenCL)
setCurrentOpenCLExtensionForDecl(Dcl);
return Dcl;
}
/// DiagnoseClassNameShadow - Implement C++ [class.mem]p13:
/// If T is the name of a class, then each of the following shall have a
/// name different from T:
/// - every static data member of class T;
/// - every member function of class T
/// - every member of class T that is itself a type;
/// \returns true if the declaration name violates these rules.
bool Sema::DiagnoseClassNameShadow(DeclContext *DC,
DeclarationNameInfo NameInfo) {
DeclarationName Name = NameInfo.getName();
CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC);
while (Record && Record->isAnonymousStructOrUnion())
Record = dyn_cast<CXXRecordDecl>(Record->getParent());
if (Record && Record->getIdentifier() && Record->getDeclName() == Name) {
Diag(NameInfo.getLoc(), diag::err_member_name_of_class) << Name;
return true;
}
return false;
}
/// Diagnose a declaration whose declarator-id has the given
/// nested-name-specifier.
///
/// \param SS The nested-name-specifier of the declarator-id.
///
/// \param DC The declaration context to which the nested-name-specifier
/// resolves.
///
/// \param Name The name of the entity being declared.
///
/// \param Loc The location of the name of the entity being declared.
///
/// \param IsTemplateId Whether the name is a (simple-)template-id, and thus
/// we're declaring an explicit / partial specialization / instantiation.
///
/// \returns true if we cannot safely recover from this error, false otherwise.
bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name,
SourceLocation Loc, bool IsTemplateId) {
DeclContext *Cur = CurContext;
while (isa<LinkageSpecDecl>(Cur) || isa<CapturedDecl>(Cur))
Cur = Cur->getParent();
// If the user provided a superfluous scope specifier that refers back to the
// class in which the entity is already declared, diagnose and ignore it.
//
// class X {
// void X::f();
// };
//
// Note, it was once ill-formed to give redundant qualification in all
// contexts, but that rule was removed by DR482.
if (Cur->Equals(DC)) {
if (Cur->isRecord()) {
Diag(Loc, LangOpts.MicrosoftExt ? diag::warn_member_extra_qualification
: diag::err_member_extra_qualification)
<< Name << FixItHint::CreateRemoval(SS.getRange());
SS.clear();
} else {
Diag(Loc, diag::warn_namespace_member_extra_qualification) << Name;
}
return false;
}
// Check whether the qualifying scope encloses the scope of the original
// declaration. For a template-id, we perform the checks in
// CheckTemplateSpecializationScope.
if (!Cur->Encloses(DC) && !IsTemplateId) {
if (Cur->isRecord())
Diag(Loc, diag::err_member_qualification)
<< Name << SS.getRange();
else if (isa<TranslationUnitDecl>(DC))
Diag(Loc, diag::err_invalid_declarator_global_scope)
<< Name << SS.getRange();
else if (isa<FunctionDecl>(Cur))
Diag(Loc, diag::err_invalid_declarator_in_function)
<< Name << SS.getRange();
else if (isa<BlockDecl>(Cur))
Diag(Loc, diag::err_invalid_declarator_in_block)
<< Name << SS.getRange();
else
Diag(Loc, diag::err_invalid_declarator_scope)
<< Name << cast<NamedDecl>(Cur) << cast<NamedDecl>(DC) << SS.getRange();
return true;
}
if (Cur->isRecord()) {
// Cannot qualify members within a class.
Diag(Loc, diag::err_member_qualification)
<< Name << SS.getRange();
SS.clear();
// C++ constructors and destructors with incorrect scopes can break
// our AST invariants by having the wrong underlying types. If
// that's the case, then drop this declaration entirely.
if ((Name.getNameKind() == DeclarationName::CXXConstructorName ||
Name.getNameKind() == DeclarationName::CXXDestructorName) &&
!Context.hasSameType(Name.getCXXNameType(),
Context.getTypeDeclType(cast<CXXRecordDecl>(Cur))))
return true;
return false;
}
// C++11 [dcl.meaning]p1:
// [...] "The nested-name-specifier of the qualified declarator-id shall
// not begin with a decltype-specifer"
NestedNameSpecifierLoc SpecLoc(SS.getScopeRep(), SS.location_data());
while (SpecLoc.getPrefix())
SpecLoc = SpecLoc.getPrefix();
if (dyn_cast_or_null<DecltypeType>(
SpecLoc.getNestedNameSpecifier()->getAsType()))
Diag(Loc, diag::err_decltype_in_declarator)
<< SpecLoc.getTypeLoc().getSourceRange();
return false;
}
NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists) {
// TODO: consider using NameInfo for diagnostic.
DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
DeclarationName Name = NameInfo.getName();
// All of these full declarators require an identifier. If it doesn't have
// one, the ParsedFreeStandingDeclSpec action should be used.
if (D.isDecompositionDeclarator()) {
return ActOnDecompositionDeclarator(S, D, TemplateParamLists);
} else if (!Name) {
if (!D.isInvalidType()) // Reject this if we think it is valid.
Diag(D.getDeclSpec().getLocStart(),
diag::err_declarator_need_ident)
<< D.getDeclSpec().getSourceRange() << D.getSourceRange();
return nullptr;
} else if (DiagnoseUnexpandedParameterPack(NameInfo, UPPC_DeclarationType))
return nullptr;
// The scope passed in may not be a decl scope. Zip up the scope tree until
// we find one that is.
while ((S->getFlags() & Scope::DeclScope) == 0 ||
(S->getFlags() & Scope::TemplateParamScope) != 0)
S = S->getParent();
DeclContext *DC = CurContext;
if (D.getCXXScopeSpec().isInvalid())
D.setInvalidType();
else if (D.getCXXScopeSpec().isSet()) {
if (DiagnoseUnexpandedParameterPack(D.getCXXScopeSpec(),
UPPC_DeclarationQualifier))
return nullptr;
bool EnteringContext = !D.getDeclSpec().isFriendSpecified();
DC = computeDeclContext(D.getCXXScopeSpec(), EnteringContext);
if (!DC || isa<EnumDecl>(DC)) {
// If we could not compute the declaration context, it's because the
// declaration context is dependent but does not refer to a class,
// class template, or class template partial specialization. Complain
// and return early, to avoid the coming semantic disaster.
Diag(D.getIdentifierLoc(),
diag::err_template_qualified_declarator_no_match)
<< D.getCXXScopeSpec().getScopeRep()
<< D.getCXXScopeSpec().getRange();
return nullptr;
}
bool IsDependentContext = DC->isDependentContext();
if (!IsDependentContext &&
RequireCompleteDeclContext(D.getCXXScopeSpec(), DC))
return nullptr;
// If a class is incomplete, do not parse entities inside it.
if (isa<CXXRecordDecl>(DC) && !cast<CXXRecordDecl>(DC)->hasDefinition()) {
Diag(D.getIdentifierLoc(),
diag::err_member_def_undefined_record)
<< Name << DC << D.getCXXScopeSpec().getRange();
return nullptr;
}
if (!D.getDeclSpec().isFriendSpecified()) {
if (diagnoseQualifiedDeclaration(
D.getCXXScopeSpec(), DC, Name, D.getIdentifierLoc(),
D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId)) {
if (DC->isRecord())
return nullptr;
D.setInvalidType();
}
}
// Check whether we need to rebuild the type of the given
// declaration in the current instantiation.
if (EnteringContext && IsDependentContext &&
TemplateParamLists.size() != 0) {
ContextRAII SavedContext(*this, DC);
if (RebuildDeclaratorInCurrentInstantiation(*this, D, Name))
D.setInvalidType();
}
}
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType R = TInfo->getType();
if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
UPPC_DeclarationType))
D.setInvalidType();
LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
forRedeclarationInCurContext());
// See if this is a redefinition of a variable in the same scope.
if (!D.getCXXScopeSpec().isSet()) {
bool IsLinkageLookup = false;
bool CreateBuiltins = false;
// If the declaration we're planning to build will be a function
// or object with linkage, then look for another declaration with
// linkage (C99 6.2.2p4-5 and C++ [basic.link]p6).
//
// If the declaration we're planning to build will be declared with
// external linkage in the translation unit, create any builtin with
// the same name.
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
/* Do nothing*/;
else if (CurContext->isFunctionOrMethod() &&
(D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_extern ||
R->isFunctionType())) {
IsLinkageLookup = true;
CreateBuiltins =
CurContext->getEnclosingNamespaceContext()->isTranslationUnit();
} else if (CurContext->getRedeclContext()->isTranslationUnit() &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static)
CreateBuiltins = true;
if (IsLinkageLookup) {
Previous.clear(LookupRedeclarationWithLinkage);
Previous.setRedeclarationKind(ForExternalRedeclaration);
}
LookupName(Previous, S, CreateBuiltins);
} else { // Something like "int foo::x;"
LookupQualifiedName(Previous, DC);
// C++ [dcl.meaning]p1:
// When the declarator-id is qualified, the declaration shall refer to a
// previously declared member of the class or namespace to which the
// qualifier refers (or, in the case of a namespace, of an element of the
// inline namespace set of that namespace (7.3.1)) or to a specialization
// thereof; [...]
//
// Note that we already checked the context above, and that we do not have
// enough information to make sure that Previous contains the declaration
// we want to match. For example, given:
//
// class X {
// void f();
// void f(float);
// };
//
// void X::f(int) { } // ill-formed
//
// In this case, Previous will point to the overload set
// containing the two f's declared in X, but neither of them
// matches.
// C++ [dcl.meaning]p1:
// [...] the member shall not merely have been introduced by a
// using-declaration in the scope of the class or namespace nominated by
// the nested-name-specifier of the declarator-id.
RemoveUsingDecls(Previous);
}
if (Previous.isSingleResult() &&
Previous.getFoundDecl()->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
if (!D.isInvalidType())
DiagnoseTemplateParameterShadow(D.getIdentifierLoc(),
Previous.getFoundDecl());
// Just pretend that we didn't see the previous declaration.
Previous.clear();
}
if (!R->isFunctionType() && DiagnoseClassNameShadow(DC, NameInfo))
// Forget that the previous declaration is the injected-class-name.
Previous.clear();
// In C++, the previous declaration we find might be a tag type
// (class or enum). In this case, the new declaration will hide the
// tag type. Note that this applies to functions, function templates, and
// variables, but not to typedefs (C++ [dcl.typedef]p4) or variable templates.
if (Previous.isSingleTagDecl() &&
D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
(TemplateParamLists.size() == 0 || R->isFunctionType()))
Previous.clear();
// Check that there are no default arguments other than in the parameters
// of a function declaration (C++ only).
if (getLangOpts().CPlusPlus)
CheckExtraCXXDefaultArguments(D);
NamedDecl *New;
bool AddToScope = true;
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) {
if (TemplateParamLists.size()) {
Diag(D.getIdentifierLoc(), diag::err_template_typedef);
return nullptr;
}
New = ActOnTypedefDeclarator(S, D, DC, TInfo, Previous);
} else if (R->isFunctionType()) {
New = ActOnFunctionDeclarator(S, D, DC, TInfo, Previous,
TemplateParamLists,
AddToScope);
} else {
New = ActOnVariableDeclarator(S, D, DC, TInfo, Previous, TemplateParamLists,
AddToScope);
}
if (!New)
return nullptr;
// If this has an identifier and is not a function template specialization,
// add it to the scope stack.
if (New->getDeclName() && AddToScope) {
// Only make a locally-scoped extern declaration visible if it is the first
// declaration of this entity. Qualified lookup for such an entity should
// only find this declaration if there is no visible declaration of it.
bool AddToContext = !D.isRedeclaration() || !New->isLocalExternDecl();
PushOnScopeChains(New, S, AddToContext);
if (!AddToContext)
CurContext->addHiddenDecl(New);
}
if (isInOpenMPDeclareTargetContext())
checkDeclIsAllowedInOpenMPTarget(nullptr, New);
return New;
}
/// Helper method to turn variable array types into constant array
/// types in certain situations which would otherwise be errors (for
/// GCC compatibility).
static QualType TryToFixInvalidVariablyModifiedType(QualType T,
ASTContext &Context,
bool &SizeIsNegative,
llvm::APSInt &Oversized) {
// This method tries to turn a variable array into a constant
// array even when the size isn't an ICE. This is necessary
// for compatibility with code that depends on gcc's buggy
// constant expression folding, like struct {char x[(int)(char*)2];}
SizeIsNegative = false;
Oversized = 0;
if (T->isDependentType())
return QualType();
QualifierCollector Qs;
const Type *Ty = Qs.strip(T);
if (const PointerType* PTy = dyn_cast<PointerType>(Ty)) {
QualType Pointee = PTy->getPointeeType();
QualType FixedType =
TryToFixInvalidVariablyModifiedType(Pointee, Context, SizeIsNegative,
Oversized);
if (FixedType.isNull()) return FixedType;
FixedType = Context.getPointerType(FixedType);
return Qs.apply(Context, FixedType);
}
if (const ParenType* PTy = dyn_cast<ParenType>(Ty)) {
QualType Inner = PTy->getInnerType();
QualType FixedType =
TryToFixInvalidVariablyModifiedType(Inner, Context, SizeIsNegative,
Oversized);
if (FixedType.isNull()) return FixedType;
FixedType = Context.getParenType(FixedType);
return Qs.apply(Context, FixedType);
}
const VariableArrayType* VLATy = dyn_cast<VariableArrayType>(T);
if (!VLATy)
return QualType();
// FIXME: We should probably handle this case
if (VLATy->getElementType()->isVariablyModifiedType())
return QualType();
llvm::APSInt Res;
if (!VLATy->getSizeExpr() ||
!VLATy->getSizeExpr()->EvaluateAsInt(Res, Context))
return QualType();
// Check whether the array size is negative.
if (Res.isSigned() && Res.isNegative()) {
SizeIsNegative = true;
return QualType();
}
// Check whether the array is too large to be addressed.
unsigned ActiveSizeBits
= ConstantArrayType::getNumAddressingBits(Context, VLATy->getElementType(),
Res);
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
Oversized = Res;
return QualType();
}
return Context.getConstantArrayType(VLATy->getElementType(),
Res, ArrayType::Normal, 0);
}
static void
FixInvalidVariablyModifiedTypeLoc(TypeLoc SrcTL, TypeLoc DstTL) {
SrcTL = SrcTL.getUnqualifiedLoc();
DstTL = DstTL.getUnqualifiedLoc();
if (PointerTypeLoc SrcPTL = SrcTL.getAs<PointerTypeLoc>()) {
PointerTypeLoc DstPTL = DstTL.castAs<PointerTypeLoc>();
FixInvalidVariablyModifiedTypeLoc(SrcPTL.getPointeeLoc(),
DstPTL.getPointeeLoc());
DstPTL.setStarLoc(SrcPTL.getStarLoc());
return;
}
if (ParenTypeLoc SrcPTL = SrcTL.getAs<ParenTypeLoc>()) {
ParenTypeLoc DstPTL = DstTL.castAs<ParenTypeLoc>();
FixInvalidVariablyModifiedTypeLoc(SrcPTL.getInnerLoc(),
DstPTL.getInnerLoc());
DstPTL.setLParenLoc(SrcPTL.getLParenLoc());
DstPTL.setRParenLoc(SrcPTL.getRParenLoc());
return;
}
ArrayTypeLoc SrcATL = SrcTL.castAs<ArrayTypeLoc>();
ArrayTypeLoc DstATL = DstTL.castAs<ArrayTypeLoc>();
TypeLoc SrcElemTL = SrcATL.getElementLoc();
TypeLoc DstElemTL = DstATL.getElementLoc();
DstElemTL.initializeFullCopy(SrcElemTL);
DstATL.setLBracketLoc(SrcATL.getLBracketLoc());
DstATL.setSizeExpr(SrcATL.getSizeExpr());
DstATL.setRBracketLoc(SrcATL.getRBracketLoc());
}
/// Helper method to turn variable array types into constant array
/// types in certain situations which would otherwise be errors (for
/// GCC compatibility).
static TypeSourceInfo*
TryToFixInvalidVariablyModifiedTypeSourceInfo(TypeSourceInfo *TInfo,
ASTContext &Context,
bool &SizeIsNegative,
llvm::APSInt &Oversized) {
QualType FixedTy
= TryToFixInvalidVariablyModifiedType(TInfo->getType(), Context,
SizeIsNegative, Oversized);
if (FixedTy.isNull())
return nullptr;
TypeSourceInfo *FixedTInfo = Context.getTrivialTypeSourceInfo(FixedTy);
FixInvalidVariablyModifiedTypeLoc(TInfo->getTypeLoc(),
FixedTInfo->getTypeLoc());
return FixedTInfo;
}
/// Register the given locally-scoped extern "C" declaration so
/// that it can be found later for redeclarations. We include any extern "C"
/// declaration that is not visible in the translation unit here, not just
/// function-scope declarations.
void
Sema::RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S) {
if (!getLangOpts().CPlusPlus &&
ND->getLexicalDeclContext()->getRedeclContext()->isTranslationUnit())
// Don't need to track declarations in the TU in C.
return;
// Note that we have a locally-scoped external with this name.
Context.getExternCContextDecl()->makeDeclVisibleInContext(ND);
}
NamedDecl *Sema::findLocallyScopedExternCDecl(DeclarationName Name) {
// FIXME: We can have multiple results via __attribute__((overloadable)).
auto Result = Context.getExternCContextDecl()->lookup(Name);
return Result.empty() ? nullptr : *Result.begin();
}
/// Diagnose function specifiers on a declaration of an identifier that
/// does not identify a function.
void Sema::DiagnoseFunctionSpecifiers(const DeclSpec &DS) {
// FIXME: We should probably indicate the identifier in question to avoid
// confusion for constructs like "virtual int a(), b;"
if (DS.isVirtualSpecified())
Diag(DS.getVirtualSpecLoc(),
diag::err_virtual_non_function);
if (DS.isExplicitSpecified())
Diag(DS.getExplicitSpecLoc(),
diag::err_explicit_non_function);
if (DS.isNoreturnSpecified())
Diag(DS.getNoreturnSpecLoc(),
diag::err_noreturn_non_function);
}
NamedDecl*
Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo, LookupResult &Previous) {
// Typedef declarators cannot be qualified (C++ [dcl.meaning]p1).
if (D.getCXXScopeSpec().isSet()) {
Diag(D.getIdentifierLoc(), diag::err_qualified_typedef_declarator)
<< D.getCXXScopeSpec().getRange();
D.setInvalidType();
// Pretend we didn't see the scope specifier.
DC = CurContext;
Previous.clear();
}
DiagnoseFunctionSpecifiers(D.getDeclSpec());
if (D.getDeclSpec().isInlineSpecified())
Diag(D.getDeclSpec().getInlineSpecLoc(), diag::err_inline_non_function)
<< getLangOpts().CPlusPlus17;
if (D.getDeclSpec().isConstexprSpecified())
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_invalid_constexpr)
<< 1;
if (D.getName().Kind != UnqualifiedIdKind::IK_Identifier) {
if (D.getName().Kind == UnqualifiedIdKind::IK_DeductionGuideName)
Diag(D.getName().StartLocation,
diag::err_deduction_guide_invalid_specifier)
<< "typedef";
else
Diag(D.getName().StartLocation, diag::err_typedef_not_identifier)
<< D.getName().getSourceRange();
return nullptr;
}
TypedefDecl *NewTD = ParseTypedefDecl(S, D, TInfo->getType(), TInfo);
if (!NewTD) return nullptr;
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewTD, D);
CheckTypedefForVariablyModifiedType(S, NewTD);
bool Redeclaration = D.isRedeclaration();
NamedDecl *ND = ActOnTypedefNameDecl(S, DC, NewTD, Previous, Redeclaration);
D.setRedeclaration(Redeclaration);
return ND;
}
void
Sema::CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *NewTD) {
// C99 6.7.7p2: If a typedef name specifies a variably modified type
// then it shall have block scope.
// Note that variably modified types must be fixed before merging the decl so
// that redeclarations will match.
TypeSourceInfo *TInfo = NewTD->getTypeSourceInfo();
QualType T = TInfo->getType();
if (T->isVariablyModifiedType()) {
setFunctionHasBranchProtectedScope();
if (S->getFnParent() == nullptr) {
bool SizeIsNegative;
llvm::APSInt Oversized;
TypeSourceInfo *FixedTInfo =
TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context,
SizeIsNegative,
Oversized);
if (FixedTInfo) {
Diag(NewTD->getLocation(), diag::warn_illegal_constant_array_size);
NewTD->setTypeSourceInfo(FixedTInfo);
} else {
if (SizeIsNegative)
Diag(NewTD->getLocation(), diag::err_typecheck_negative_array_size);
else if (T->isVariableArrayType())
Diag(NewTD->getLocation(), diag::err_vla_decl_in_file_scope);
else if (Oversized.getBoolValue())
Diag(NewTD->getLocation(), diag::err_array_too_large)
<< Oversized.toString(10);
else
Diag(NewTD->getLocation(), diag::err_vm_decl_in_file_scope);
NewTD->setInvalidDecl();
}
}
}
}
/// ActOnTypedefNameDecl - Perform semantic checking for a declaration which
/// declares a typedef-name, either using the 'typedef' type specifier or via
/// a C++0x [dcl.typedef]p2 alias-declaration: 'using T = A;'.
NamedDecl*
Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
LookupResult &Previous, bool &Redeclaration) {
// Find the shadowed declaration before filtering for scope.
NamedDecl *ShadowedDecl = getShadowedDeclaration(NewTD, Previous);
// Merge the decl with the existing one if appropriate. If the decl is
// in an outer scope, it isn't the same thing.
FilterLookupForScope(Previous, DC, S, /*ConsiderLinkage*/false,
/*AllowInlineNamespace*/false);
filterNonConflictingPreviousTypedefDecls(*this, NewTD, Previous);
if (!Previous.empty()) {
Redeclaration = true;
MergeTypedefNameDecl(S, NewTD, Previous);
}
if (ShadowedDecl && !Redeclaration)
CheckShadow(NewTD, ShadowedDecl, Previous);
// If this is the C FILE type, notify the AST context.
if (IdentifierInfo *II = NewTD->getIdentifier())
if (!NewTD->isInvalidDecl() &&
NewTD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
if (II->isStr("FILE"))
Context.setFILEDecl(NewTD);
else if (II->isStr("jmp_buf"))
Context.setjmp_bufDecl(NewTD);
else if (II->isStr("sigjmp_buf"))
Context.setsigjmp_bufDecl(NewTD);
else if (II->isStr("ucontext_t"))
Context.setucontext_tDecl(NewTD);
}
return NewTD;
}
/// Determines whether the given declaration is an out-of-scope
/// previous declaration.
///
/// This routine should be invoked when name lookup has found a
/// previous declaration (PrevDecl) that is not in the scope where a
/// new declaration by the same name is being introduced. If the new
/// declaration occurs in a local scope, previous declarations with
/// linkage may still be considered previous declarations (C99
/// 6.2.2p4-5, C++ [basic.link]p6).
///
/// \param PrevDecl the previous declaration found by name
/// lookup
///
/// \param DC the context in which the new declaration is being
/// declared.
///
/// \returns true if PrevDecl is an out-of-scope previous declaration
/// for a new delcaration with the same name.
static bool
isOutOfScopePreviousDeclaration(NamedDecl *PrevDecl, DeclContext *DC,
ASTContext &Context) {
if (!PrevDecl)
return false;
if (!PrevDecl->hasLinkage())
return false;
if (Context.getLangOpts().CPlusPlus) {
// C++ [basic.link]p6:
// If there is a visible declaration of an entity with linkage
// having the same name and type, ignoring entities declared
// outside the innermost enclosing namespace scope, the block
// scope declaration declares that same entity and receives the
// linkage of the previous declaration.
DeclContext *OuterContext = DC->getRedeclContext();
if (!OuterContext->isFunctionOrMethod())
// This rule only applies to block-scope declarations.
return false;
DeclContext *PrevOuterContext = PrevDecl->getDeclContext();
if (PrevOuterContext->isRecord())
// We found a member function: ignore it.
return false;
// Find the innermost enclosing namespace for the new and
// previous declarations.
OuterContext = OuterContext->getEnclosingNamespaceContext();
PrevOuterContext = PrevOuterContext->getEnclosingNamespaceContext();
// The previous declaration is in a different namespace, so it
// isn't the same function.
if (!OuterContext->Equals(PrevOuterContext))
return false;
}
return true;
}
static void SetNestedNameSpecifier(DeclaratorDecl *DD, Declarator &D) {
CXXScopeSpec &SS = D.getCXXScopeSpec();
if (!SS.isSet()) return;
DD->setQualifierInfo(SS.getWithLocInContext(DD->getASTContext()));
}
bool Sema::inferObjCARCLifetime(ValueDecl *decl) {
QualType type = decl->getType();
Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime();
if (lifetime == Qualifiers::OCL_Autoreleasing) {
// Various kinds of declaration aren't allowed to be __autoreleasing.
unsigned kind = -1U;
if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
if (var->hasAttr<BlocksAttr>())
kind = 0; // __block
else if (!var->hasLocalStorage())
kind = 1; // global
} else if (isa<ObjCIvarDecl>(decl)) {
kind = 3; // ivar
} else if (isa<FieldDecl>(decl)) {
kind = 2; // field
}
if (kind != -1U) {
Diag(decl->getLocation(), diag::err_arc_autoreleasing_var)
<< kind;
}
} else if (lifetime == Qualifiers::OCL_None) {
// Try to infer lifetime.
if (!type->isObjCLifetimeType())
return false;
lifetime = type->getObjCARCImplicitLifetime();
type = Context.getLifetimeQualifiedType(type, lifetime);
decl->setType(type);
}
if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
// Thread-local variables cannot have lifetime.
if (lifetime && lifetime != Qualifiers::OCL_ExplicitNone &&
var->getTLSKind()) {
Diag(var->getLocation(), diag::err_arc_thread_ownership)
<< var->getType();
return true;
}
}
return false;
}
static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) {
// Ensure that an auto decl is deduced otherwise the checks below might cache
// the wrong linkage.
assert(S.ParsingInitForAutoVars.count(&ND) == 0);
// 'weak' only applies to declarations with external linkage.
if (WeakAttr *Attr = ND.getAttr<WeakAttr>()) {
if (!ND.isExternallyVisible()) {
S.Diag(Attr->getLocation(), diag::err_attribute_weak_static);
ND.dropAttr<WeakAttr>();
}
}
if (WeakRefAttr *Attr = ND.getAttr<WeakRefAttr>()) {
if (ND.isExternallyVisible()) {
S.Diag(Attr->getLocation(), diag::err_attribute_weakref_not_static);
ND.dropAttr<WeakRefAttr>();
ND.dropAttr<AliasAttr>();
}
}
if (auto *VD = dyn_cast<VarDecl>(&ND)) {
if (VD->hasInit()) {
if (const auto *Attr = VD->getAttr<AliasAttr>()) {
assert(VD->isThisDeclarationADefinition() &&
!VD->isExternallyVisible() && "Broken AliasAttr handled late!");
S.Diag(Attr->getLocation(), diag::err_alias_is_definition) << VD << 0;
VD->dropAttr<AliasAttr>();
}
}
}
// 'selectany' only applies to externally visible variable declarations.
// It does not apply to functions.
if (SelectAnyAttr *Attr = ND.getAttr<SelectAnyAttr>()) {
if (isa<FunctionDecl>(ND) || !ND.isExternallyVisible()) {
S.Diag(Attr->getLocation(),
diag::err_attribute_selectany_non_extern_data);
ND.dropAttr<SelectAnyAttr>();
}
}
if (const InheritableAttr *Attr = getDLLAttr(&ND)) {
// dll attributes require external linkage. Static locals may have external
// linkage but still cannot be explicitly imported or exported.
auto *VD = dyn_cast<VarDecl>(&ND);
if (!ND.isExternallyVisible() || (VD && VD->isStaticLocal())) {
S.Diag(ND.getLocation(), diag::err_attribute_dll_not_extern)
<< &ND << Attr;
ND.setInvalidDecl();
}
}
// Virtual functions cannot be marked as 'notail'.
if (auto *Attr = ND.getAttr<NotTailCalledAttr>())
if (auto *MD = dyn_cast<CXXMethodDecl>(&ND))
if (MD->isVirtual()) {
S.Diag(ND.getLocation(),
diag::err_invalid_attribute_on_virtual_function)
<< Attr;
ND.dropAttr<NotTailCalledAttr>();
}
// Check the attributes on the function type, if any.
if (const auto *FD = dyn_cast<FunctionDecl>(&ND)) {
// Don't declare this variable in the second operand of the for-statement;
// GCC miscompiles that by ending its lifetime before evaluating the
// third operand. See gcc.gnu.org/PR86769.
AttributedTypeLoc ATL;
for (TypeLoc TL = FD->getTypeSourceInfo()->getTypeLoc();
(ATL = TL.getAsAdjusted<AttributedTypeLoc>());
TL = ATL.getModifiedLoc()) {
// The [[lifetimebound]] attribute can be applied to the implicit object
// parameter of a non-static member function (other than a ctor or dtor)
// by applying it to the function type.
if (ATL.getAttrKind() == AttributedType::attr_lifetimebound) {
const auto *MD = dyn_cast<CXXMethodDecl>(FD);
if (!MD || MD->isStatic()) {
S.Diag(ATL.getAttrNameLoc(), diag::err_lifetimebound_no_object_param)
<< !MD << ATL.getLocalSourceRange();
} else if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)) {
S.Diag(ATL.getAttrNameLoc(), diag::err_lifetimebound_ctor_dtor)
<< isa<CXXDestructorDecl>(MD) << ATL.getLocalSourceRange();
}
}
}
}
}
static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
NamedDecl *NewDecl,
bool IsSpecialization,
bool IsDefinition) {
if (OldDecl->isInvalidDecl() || NewDecl->isInvalidDecl())
return;
bool IsTemplate = false;
if (TemplateDecl *OldTD = dyn_cast<TemplateDecl>(OldDecl)) {
OldDecl = OldTD->getTemplatedDecl();
IsTemplate = true;
if (!IsSpecialization)
IsDefinition = false;
}
if (TemplateDecl *NewTD = dyn_cast<TemplateDecl>(NewDecl)) {
NewDecl = NewTD->getTemplatedDecl();
IsTemplate = true;
}
if (!OldDecl || !NewDecl)
return;
const DLLImportAttr *OldImportAttr = OldDecl->getAttr<DLLImportAttr>();
const DLLExportAttr *OldExportAttr = OldDecl->getAttr<DLLExportAttr>();
const DLLImportAttr *NewImportAttr = NewDecl->getAttr<DLLImportAttr>();
const DLLExportAttr *NewExportAttr = NewDecl->getAttr<DLLExportAttr>();
// dllimport and dllexport are inheritable attributes so we have to exclude
// inherited attribute instances.
bool HasNewAttr = (NewImportAttr && !NewImportAttr->isInherited()) ||
(NewExportAttr && !NewExportAttr->isInherited());
// A redeclaration is not allowed to add a dllimport or dllexport attribute,
// the only exception being explicit specializations.
// Implicitly generated declarations are also excluded for now because there
// is no other way to switch these to use dllimport or dllexport.
bool AddsAttr = !(OldImportAttr || OldExportAttr) && HasNewAttr;
if (AddsAttr && !IsSpecialization && !OldDecl->isImplicit()) {
// Allow with a warning for free functions and global variables.
bool JustWarn = false;
if (!OldDecl->isCXXClassMember()) {
auto *VD = dyn_cast<VarDecl>(OldDecl);
if (VD && !VD->getDescribedVarTemplate())
JustWarn = true;
auto *FD = dyn_cast<FunctionDecl>(OldDecl);
if (FD && FD->getTemplatedKind() == FunctionDecl::TK_NonTemplate)
JustWarn = true;
}
// We cannot change a declaration that's been used because IR has already
// been emitted. Dllimported functions will still work though (modulo
// address equality) as they can use the thunk.
if (OldDecl->isUsed())
if (!isa<FunctionDecl>(OldDecl) || !NewImportAttr)
JustWarn = false;
unsigned DiagID = JustWarn ? diag::warn_attribute_dll_redeclaration
: diag::err_attribute_dll_redeclaration;
S.Diag(NewDecl->getLocation(), DiagID)
<< NewDecl
<< (NewImportAttr ? (const Attr *)NewImportAttr : NewExportAttr);
S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
if (!JustWarn) {
NewDecl->setInvalidDecl();
return;
}
}
// A redeclaration is not allowed to drop a dllimport attribute, the only
// exceptions being inline function definitions (except for function
// templates), local extern declarations, qualified friend declarations or
// special MSVC extension: in the last case, the declaration is treated as if
// it were marked dllexport.
bool IsInline = false, IsStaticDataMember = false, IsQualifiedFriend = false;
bool IsMicrosoft = S.Context.getTargetInfo().getCXXABI().isMicrosoft();
if (const auto *VD = dyn_cast<VarDecl>(NewDecl)) {
// Ignore static data because out-of-line definitions are diagnosed
// separately.
IsStaticDataMember = VD->isStaticDataMember();
IsDefinition = VD->isThisDeclarationADefinition(S.Context) !=
VarDecl::DeclarationOnly;
} else if (const auto *FD = dyn_cast<FunctionDecl>(NewDecl)) {
IsInline = FD->isInlined();
IsQualifiedFriend = FD->getQualifier() &&
FD->getFriendObjectKind() == Decl::FOK_Declared;
}
if (OldImportAttr && !HasNewAttr &&
(!IsInline || (IsMicrosoft && IsTemplate)) && !IsStaticDataMember &&
!NewDecl->isLocalExternDecl() && !IsQualifiedFriend) {
if (IsMicrosoft && IsDefinition) {
S.Diag(NewDecl->getLocation(),
diag::warn_redeclaration_without_import_attribute)
<< NewDecl;
S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
NewDecl->dropAttr<DLLImportAttr>();
NewDecl->addAttr(::new (S.Context) DLLExportAttr(
NewImportAttr->getRange(), S.Context,
NewImportAttr->getSpellingListIndex()));
} else {
S.Diag(NewDecl->getLocation(),
diag::warn_redeclaration_without_attribute_prev_attribute_ignored)
<< NewDecl << OldImportAttr;
S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
S.Diag(OldImportAttr->getLocation(), diag::note_previous_attribute);
OldDecl->dropAttr<DLLImportAttr>();
NewDecl->dropAttr<DLLImportAttr>();
}
} else if (IsInline && OldImportAttr && !IsMicrosoft) {
// In MinGW, seeing a function declared inline drops the dllimport
// attribute.
OldDecl->dropAttr<DLLImportAttr>();
NewDecl->dropAttr<DLLImportAttr>();
S.Diag(NewDecl->getLocation(),
diag::warn_dllimport_dropped_from_inline_function)
<< NewDecl << OldImportAttr;
}
// A specialization of a class template member function is processed here
// since it's a redeclaration. If the parent class is dllexport, the
// specialization inherits that attribute. This doesn't happen automatically
// since the parent class isn't instantiated until later.
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewDecl)) {
if (MD->getTemplatedKind() == FunctionDecl::TK_MemberSpecialization &&
!NewImportAttr && !NewExportAttr) {
if (const DLLExportAttr *ParentExportAttr =
MD->getParent()->getAttr<DLLExportAttr>()) {
DLLExportAttr *NewAttr = ParentExportAttr->clone(S.Context);
NewAttr->setInherited(true);
NewDecl->addAttr(NewAttr);
}
}
}
}
/// Given that we are within the definition of the given function,
/// will that definition behave like C99's 'inline', where the
/// definition is discarded except for optimization purposes?
static bool isFunctionDefinitionDiscarded(Sema &S, FunctionDecl *FD) {
// Try to avoid calling GetGVALinkageForFunction.
// All cases of this require the 'inline' keyword.
if (!FD->isInlined()) return false;
// This is only possible in C++ with the gnu_inline attribute.
if (S.getLangOpts().CPlusPlus && !FD->hasAttr<GNUInlineAttr>())
return false;
// Okay, go ahead and call the relatively-more-expensive function.
return S.Context.GetGVALinkageForFunction(FD) == GVA_AvailableExternally;
}
/// Determine whether a variable is extern "C" prior to attaching
/// an initializer. We can't just call isExternC() here, because that
/// will also compute and cache whether the declaration is externally
/// visible, which might change when we attach the initializer.
///
/// This can only be used if the declaration is known to not be a
/// redeclaration of an internal linkage declaration.
///
/// For instance:
///
/// auto x = []{};
///
/// Attaching the initializer here makes this declaration not externally
/// visible, because its type has internal linkage.
///
/// FIXME: This is a hack.
template<typename T>
static bool isIncompleteDeclExternC(Sema &S, const T *D) {
if (S.getLangOpts().CPlusPlus) {
// In C++, the overloadable attribute negates the effects of extern "C".
if (!D->isInExternCContext() || D->template hasAttr<OverloadableAttr>())
return false;
// So do CUDA's host/device attributes.
if (S.getLangOpts().CUDA && (D->template hasAttr<CUDADeviceAttr>() ||
D->template hasAttr<CUDAHostAttr>()))
return false;
}
return D->isExternC();
}
static bool shouldConsiderLinkage(const VarDecl *VD) {
const DeclContext *DC = VD->getDeclContext()->getRedeclContext();
if (DC->isFunctionOrMethod() || isa<OMPDeclareReductionDecl>(DC))
return VD->hasExternalStorage();
if (DC->isFileContext())
return true;
if (DC->isRecord())
return false;
llvm_unreachable("Unexpected context");
}
static bool shouldConsiderLinkage(const FunctionDecl *FD) {
const DeclContext *DC = FD->getDeclContext()->getRedeclContext();
if (DC->isFileContext() || DC->isFunctionOrMethod() ||
isa<OMPDeclareReductionDecl>(DC))
return true;
if (DC->isRecord())
return false;
llvm_unreachable("Unexpected context");
}
static bool hasParsedAttr(Scope *S, const Declarator &PD,
ParsedAttr::Kind Kind) {
// Check decl attributes on the DeclSpec.
if (PD.getDeclSpec().getAttributes().hasAttribute(Kind))
return true;
// Walk the declarator structure, checking decl attributes that were in a type
// position to the decl itself.
for (unsigned I = 0, E = PD.getNumTypeObjects(); I != E; ++I) {
if (PD.getTypeObject(I).getAttrs().hasAttribute(Kind))
return true;
}
// Finally, check attributes on the decl itself.
return PD.getAttributes().hasAttribute(Kind);
}
/// Adjust the \c DeclContext for a function or variable that might be a
/// function-local external declaration.
bool Sema::adjustContextForLocalExternDecl(DeclContext *&DC) {
if (!DC->isFunctionOrMethod())
return false;
// If this is a local extern function or variable declared within a function
// template, don't add it into the enclosing namespace scope until it is
// instantiated; it might have a dependent type right now.
if (DC->isDependentContext())
return true;
// C++11 [basic.link]p7:
// When a block scope declaration of an entity with linkage is not found to
// refer to some other declaration, then that entity is a member of the
// innermost enclosing namespace.
//
// Per C++11 [namespace.def]p6, the innermost enclosing namespace is a
// semantically-enclosing namespace, not a lexically-enclosing one.
while (!DC->isFileContext() && !isa<LinkageSpecDecl>(DC))
DC = DC->getParent();
return true;
}
/// Returns true if given declaration has external C language linkage.
static bool isDeclExternC(const Decl *D) {
if (const auto *FD = dyn_cast<FunctionDecl>(D))
return FD->isExternC();
if (const auto *VD = dyn_cast<VarDecl>(D))
return VD->isExternC();
llvm_unreachable("Unknown type of decl!");
}
NamedDecl *Sema::ActOnVariableDeclarator(
Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo,
LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope, ArrayRef<BindingDecl *> Bindings) {
QualType R = TInfo->getType();
DeclarationName Name = GetNameForDeclarator(D).getName();
IdentifierInfo *II = Name.getAsIdentifierInfo();
if (D.isDecompositionDeclarator()) {
// Take the name of the first declarator as our name for diagnostic
// purposes.
auto &Decomp = D.getDecompositionDeclarator();
if (!Decomp.bindings().empty()) {
II = Decomp.bindings()[0].Name;
Name = II;
}
} else if (!II) {
Diag(D.getIdentifierLoc(), diag::err_bad_variable_name) << Name;
return nullptr;
}
if (getLangOpts().OpenCL) {
// OpenCL v2.0 s6.9.b - Image type can only be used as a function argument.
// OpenCL v2.0 s6.13.16.1 - Pipe type can only be used as a function
// argument.
if (R->isImageType() || R->isPipeType()) {
Diag(D.getIdentifierLoc(),
diag::err_opencl_type_can_only_be_used_as_function_parameter)
<< R;
D.setInvalidType();
return nullptr;
}
// OpenCL v1.2 s6.9.r:
// The event type cannot be used to declare a program scope variable.
// OpenCL v2.0 s6.9.q:
// The clk_event_t and reserve_id_t types cannot be declared in program scope.
if (NULL == S->getParent()) {
if (R->isReserveIDT() || R->isClkEventT() || R->isEventT()) {
Diag(D.getIdentifierLoc(),
diag::err_invalid_type_for_program_scope_var) << R;
D.setInvalidType();
return nullptr;
}
}
// OpenCL v1.0 s6.8.a.3: Pointers to functions are not allowed.
QualType NR = R;
while (NR->isPointerType()) {
if (NR->isFunctionPointerType()) {
Diag(D.getIdentifierLoc(), diag::err_opencl_function_pointer);
D.setInvalidType();
break;
}
NR = NR->getPointeeType();
}
if (!getOpenCLOptions().isEnabled("cl_khr_fp16")) {
// OpenCL v1.2 s6.1.1.1: reject declaring variables of the half and
// half array type (unless the cl_khr_fp16 extension is enabled).
if (Context.getBaseElementType(R)->isHalfType()) {
Diag(D.getIdentifierLoc(), diag::err_opencl_half_declaration) << R;
D.setInvalidType();
}
}
if (R->isSamplerT()) {
// OpenCL v1.2 s6.9.b p4:
// The sampler type cannot be used with the __local and __global address
// space qualifiers.
if (R.getAddressSpace() == LangAS::opencl_local ||
R.getAddressSpace() == LangAS::opencl_global) {
Diag(D.getIdentifierLoc(), diag::err_wrong_sampler_addressspace);
}
// OpenCL v1.2 s6.12.14.1:
// A global sampler must be declared with either the constant address
// space qualifier or with the const qualifier.
if (DC->isTranslationUnit() &&
!(R.getAddressSpace() == LangAS::opencl_constant ||
R.isConstQualified())) {
Diag(D.getIdentifierLoc(), diag::err_opencl_nonconst_global_sampler);
D.setInvalidType();
}
}
// OpenCL v1.2 s6.9.r:
// The event type cannot be used with the __local, __constant and __global
// address space qualifiers.
if (R->isEventT()) {
if (R.getAddressSpace() != LangAS::opencl_private) {
Diag(D.getLocStart(), diag::err_event_t_addr_space_qual);
D.setInvalidType();
}
}
// OpenCL C++ 1.0 s2.9: the thread_local storage qualifier is not
// supported. OpenCL C does not support thread_local either, and
// also reject all other thread storage class specifiers.
DeclSpec::TSCS TSC = D.getDeclSpec().getThreadStorageClassSpec();
if (TSC != TSCS_unspecified) {
bool IsCXX = getLangOpts().OpenCLCPlusPlus;
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_opencl_unknown_type_specifier)
<< IsCXX << getLangOpts().getOpenCLVersionTuple().getAsString()
<< DeclSpec::getSpecifierName(TSC) << 1;
D.setInvalidType();
return nullptr;
}
}
DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpec();
StorageClass SC = StorageClassSpecToVarDeclStorageClass(D.getDeclSpec());
// dllimport globals without explicit storage class are treated as extern. We
// have to change the storage class this early to get the right DeclContext.
if (SC == SC_None && !DC->isRecord() &&
hasParsedAttr(S, D, ParsedAttr::AT_DLLImport) &&
!hasParsedAttr(S, D, ParsedAttr::AT_DLLExport))
SC = SC_Extern;
DeclContext *OriginalDC = DC;
bool IsLocalExternDecl = SC == SC_Extern &&
adjustContextForLocalExternDecl(DC);
if (SCSpec == DeclSpec::SCS_mutable) {
// mutable can only appear on non-static class members, so it's always
// an error here
Diag(D.getIdentifierLoc(), diag::err_mutable_nonmember);
D.setInvalidType();
SC = SC_None;
}
if (getLangOpts().CPlusPlus11 && SCSpec == DeclSpec::SCS_register &&
!D.getAsmLabel() && !getSourceManager().isInSystemMacro(
D.getDeclSpec().getStorageClassSpecLoc())) {
// In C++11, the 'register' storage class specifier is deprecated.
// Suppress the warning in system macros, it's used in macros in some
// popular C system headers, such as in glibc's htonl() macro.
Diag(D.getDeclSpec().getStorageClassSpecLoc(),
getLangOpts().CPlusPlus17 ? diag::ext_register_storage_class
: diag::warn_deprecated_register)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
}
DiagnoseFunctionSpecifiers(D.getDeclSpec());
if (!DC->isRecord() && S->getFnParent() == nullptr) {
// C99 6.9p2: The storage-class specifiers auto and register shall not
// appear in the declaration specifiers in an external declaration.
// Global Register+Asm is a GNU extension we support.
if (SC == SC_Auto || (SC == SC_Register && !D.getAsmLabel())) {
Diag(D.getIdentifierLoc(), diag::err_typecheck_sclass_fscope);
D.setInvalidType();
}
}
bool IsMemberSpecialization = false;
bool IsVariableTemplateSpecialization = false;
bool IsPartialSpecialization = false;
bool IsVariableTemplate = false;
VarDecl *NewVD = nullptr;
VarTemplateDecl *NewTemplate = nullptr;
TemplateParameterList *TemplateParams = nullptr;
if (!getLangOpts().CPlusPlus) {
NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
D.getIdentifierLoc(), II,
R, TInfo, SC);
if (R->getContainedDeducedType())
ParsingInitForAutoVars.insert(NewVD);
if (D.isInvalidType())
NewVD->setInvalidDecl();
} else {
bool Invalid = false;
if (DC->isRecord() && !CurContext->isRecord()) {
// This is an out-of-line definition of a static data member.
switch (SC) {
case SC_None:
break;
case SC_Static:
Diag(D.getDeclSpec().getStorageClassSpecLoc(),
diag::err_static_out_of_line)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
break;
case SC_Auto:
case SC_Register:
case SC_Extern:
// [dcl.stc] p2: The auto or register specifiers shall be applied only
// to names of variables declared in a block or to function parameters.
// [dcl.stc] p6: The extern specifier cannot be used in the declaration
// of class members
Diag(D.getDeclSpec().getStorageClassSpecLoc(),
diag::err_storage_class_for_static_member)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
break;
case SC_PrivateExtern:
llvm_unreachable("C storage class in c++!");
}
}
if (SC == SC_Static && CurContext->isRecord()) {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
if (RD->isLocalClass())
Diag(D.getIdentifierLoc(),
diag::err_static_data_member_not_allowed_in_local_class)
<< Name << RD->getDeclName();
// C++98 [class.union]p1: If a union contains a static data member,
// the program is ill-formed. C++11 drops this restriction.
if (RD->isUnion())
Diag(D.getIdentifierLoc(),
getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_static_data_member_in_union
: diag::ext_static_data_member_in_union) << Name;
// We conservatively disallow static data members in anonymous structs.
else if (!RD->getDeclName())
Diag(D.getIdentifierLoc(),
diag::err_static_data_member_not_allowed_in_anon_struct)
<< Name << RD->isUnion();
}
}
// Match up the template parameter lists with the scope specifier, then
// determine whether we have a template or a template specialization.
TemplateParams = MatchTemplateParametersToScopeSpecifier(
D.getDeclSpec().getLocStart(), D.getIdentifierLoc(),
D.getCXXScopeSpec(),
D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
? D.getName().TemplateId
: nullptr,
TemplateParamLists,
/*never a friend*/ false, IsMemberSpecialization, Invalid);
if (TemplateParams) {
if (!TemplateParams->size() &&
D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) {
// There is an extraneous 'template<>' for this variable. Complain
// about it, but allow the declaration of the variable.
Diag(TemplateParams->getTemplateLoc(),
diag::err_template_variable_noparams)
<< II
<< SourceRange(TemplateParams->getTemplateLoc(),
TemplateParams->getRAngleLoc());
TemplateParams = nullptr;
} else {
if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
// This is an explicit specialization or a partial specialization.
// FIXME: Check that we can declare a specialization here.
IsVariableTemplateSpecialization = true;
IsPartialSpecialization = TemplateParams->size() > 0;
} else { // if (TemplateParams->size() > 0)
// This is a template declaration.
IsVariableTemplate = true;
// Check that we can declare a template here.
if (CheckTemplateDeclScope(S, TemplateParams))
return nullptr;
// Only C++1y supports variable templates (N3651).
Diag(D.getIdentifierLoc(),
getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_variable_template
: diag::ext_variable_template);
}
}
} else {
assert((Invalid ||
D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) &&
"should have a 'template<>' for this decl");
}
if (IsVariableTemplateSpecialization) {
SourceLocation TemplateKWLoc =
TemplateParamLists.size() > 0
? TemplateParamLists[0]->getTemplateLoc()
: SourceLocation();
DeclResult Res = ActOnVarTemplateSpecialization(
S, D, TInfo, TemplateKWLoc, TemplateParams, SC,
IsPartialSpecialization);
if (Res.isInvalid())
return nullptr;
NewVD = cast<VarDecl>(Res.get());
AddToScope = false;
} else if (D.isDecompositionDeclarator()) {
NewVD = DecompositionDecl::Create(Context, DC, D.getLocStart(),
D.getIdentifierLoc(), R, TInfo, SC,
Bindings);
} else
NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
D.getIdentifierLoc(), II, R, TInfo, SC);
// If this is supposed to be a variable template, create it as such.
if (IsVariableTemplate) {
NewTemplate =
VarTemplateDecl::Create(Context, DC, D.getIdentifierLoc(), Name,
TemplateParams, NewVD);
NewVD->setDescribedVarTemplate(NewTemplate);
}
// If this decl has an auto type in need of deduction, make a note of the
// Decl so we can diagnose uses of it in its own initializer.
if (R->getContainedDeducedType())
ParsingInitForAutoVars.insert(NewVD);
if (D.isInvalidType() || Invalid) {
NewVD->setInvalidDecl();
if (NewTemplate)
NewTemplate->setInvalidDecl();
}
SetNestedNameSpecifier(NewVD, D);
// If we have any template parameter lists that don't directly belong to
// the variable (matching the scope specifier), store them.
unsigned VDTemplateParamLists = TemplateParams ? 1 : 0;
if (TemplateParamLists.size() > VDTemplateParamLists)
NewVD->setTemplateParameterListsInfo(
Context, TemplateParamLists.drop_back(VDTemplateParamLists));
if (D.getDeclSpec().isConstexprSpecified()) {
NewVD->setConstexpr(true);
// C++1z [dcl.spec.constexpr]p1:
// A static data member declared with the constexpr specifier is
// implicitly an inline variable.
if (NewVD->isStaticDataMember() && getLangOpts().CPlusPlus17)
NewVD->setImplicitlyInline();
}
}
if (D.getDeclSpec().isInlineSpecified()) {
if (!getLangOpts().CPlusPlus) {
Diag(D.getDeclSpec().getInlineSpecLoc(), diag::err_inline_non_function)
<< 0;
} else if (CurContext->isFunctionOrMethod()) {
// 'inline' is not allowed on block scope variable declaration.
Diag(D.getDeclSpec().getInlineSpecLoc(),
diag::err_inline_declaration_block_scope) << Name
<< FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
} else {
Diag(D.getDeclSpec().getInlineSpecLoc(),
getLangOpts().CPlusPlus17 ? diag::warn_cxx14_compat_inline_variable
: diag::ext_inline_variable);
NewVD->setInlineSpecified();
}
}
// Set the lexical context. If the declarator has a C++ scope specifier, the
// lexical context will be different from the semantic context.
NewVD->setLexicalDeclContext(CurContext);
if (NewTemplate)
NewTemplate->setLexicalDeclContext(CurContext);
if (IsLocalExternDecl) {
if (D.isDecompositionDeclarator())
for (auto *B : Bindings)
B->setLocalExternDecl();
else
NewVD->setLocalExternDecl();
}
bool EmitTLSUnsupportedError = false;
if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec()) {
// C++11 [dcl.stc]p4:
// When thread_local is applied to a variable of block scope the
// storage-class-specifier static is implied if it does not appear
// explicitly.
// Core issue: 'static' is not implied if the variable is declared
// 'extern'.
if (NewVD->hasLocalStorage() &&
(SCSpec != DeclSpec::SCS_unspecified ||
TSCS != DeclSpec::TSCS_thread_local ||
!DC->isFunctionOrMethod()))
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_thread_non_global)
<< DeclSpec::getSpecifierName(TSCS);
else if (!Context.getTargetInfo().isTLSSupported()) {
if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice) {
// Postpone error emission until we've collected attributes required to
// figure out whether it's a host or device variable and whether the
// error should be ignored.
EmitTLSUnsupportedError = true;
// We still need to mark the variable as TLS so it shows up in AST with
// proper storage class for other tools to use even if we're not going
// to emit any code for it.
NewVD->setTSCSpec(TSCS);
} else
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_thread_unsupported);
} else
NewVD->setTSCSpec(TSCS);
}
// C99 6.7.4p3
// An inline definition of a function with external linkage shall
// not contain a definition of a modifiable object with static or
// thread storage duration...
// We only apply this when the function is required to be defined
// elsewhere, i.e. when the function is not 'extern inline'. Note
// that a local variable with thread storage duration still has to
// be marked 'static'. Also note that it's possible to get these
// semantics in C++ using __attribute__((gnu_inline)).
if (SC == SC_Static && S->getFnParent() != nullptr &&
!NewVD->getType().isConstQualified()) {
FunctionDecl *CurFD = getCurFunctionDecl();
if (CurFD && isFunctionDefinitionDiscarded(*this, CurFD)) {
Diag(D.getDeclSpec().getStorageClassSpecLoc(),
diag::warn_static_local_in_extern_inline);
MaybeSuggestAddingStaticToDecl(CurFD);
}
}
if (D.getDeclSpec().isModulePrivateSpecified()) {
if (IsVariableTemplateSpecialization)
Diag(NewVD->getLocation(), diag::err_module_private_specialization)
<< (IsPartialSpecialization ? 1 : 0)
<< FixItHint::CreateRemoval(
D.getDeclSpec().getModulePrivateSpecLoc());
else if (IsMemberSpecialization)
Diag(NewVD->getLocation(), diag::err_module_private_specialization)
<< 2
<< FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
else if (NewVD->hasLocalStorage())
Diag(NewVD->getLocation(), diag::err_module_private_local)
<< 0 << NewVD->getDeclName()
<< SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
<< FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
else {
NewVD->setModulePrivate();
if (NewTemplate)
NewTemplate->setModulePrivate();
for (auto *B : Bindings)
B->setModulePrivate();
}
}
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice) {
if (EmitTLSUnsupportedError &&
((getLangOpts().CUDA && DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) ||
(getLangOpts().OpenMPIsDevice &&
NewVD->hasAttr<OMPDeclareTargetDeclAttr>())))
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_thread_unsupported);
// CUDA B.2.5: "__shared__ and __constant__ variables have implied static
// storage [duration]."
if (SC == SC_None && S->getFnParent() != nullptr &&
(NewVD->hasAttr<CUDASharedAttr>() ||
NewVD->hasAttr<CUDAConstantAttr>())) {
NewVD->setStorageClass(SC_Static);
}
}
// Ensure that dllimport globals without explicit storage class are treated as
// extern. The storage class is set above using parsed attributes. Now we can
// check the VarDecl itself.
assert(!NewVD->hasAttr<DLLImportAttr>() ||
NewVD->getAttr<DLLImportAttr>()->isInherited() ||
NewVD->isStaticDataMember() || NewVD->getStorageClass() != SC_None);
// In auto-retain/release, infer strong retension for variables of
// retainable type.
if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewVD))
NewVD->setInvalidDecl();
// Handle GNU asm-label extension (encoded as an attribute).
if (Expr *E = (Expr*)D.getAsmLabel()) {
// The parser guarantees this is a string.
StringLiteral *SE = cast<StringLiteral>(E);
StringRef Label = SE->getString();
if (S->getFnParent() != nullptr) {
switch (SC) {
case SC_None:
case SC_Auto:
Diag(E->getExprLoc(), diag::warn_asm_label_on_auto_decl) << Label;
break;
case SC_Register:
// Local Named register
if (!Context.getTargetInfo().isValidGCCRegisterName(Label) &&
DeclAttrsMatchCUDAMode(getLangOpts(), getCurFunctionDecl()))
Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
break;
case SC_Static:
case SC_Extern:
case SC_PrivateExtern:
break;
}
} else if (SC == SC_Register) {
// Global Named register
if (DeclAttrsMatchCUDAMode(getLangOpts(), NewVD)) {
const auto &TI = Context.getTargetInfo();
bool HasSizeMismatch;
if (!TI.isValidGCCRegisterName(Label))
Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
else if (!TI.validateGlobalRegisterVariable(Label,
Context.getTypeSize(R),
HasSizeMismatch))
Diag(E->getExprLoc(), diag::err_asm_invalid_global_var_reg) << Label;
else if (HasSizeMismatch)
Diag(E->getExprLoc(), diag::err_asm_register_size_mismatch) << Label;
}
if (!R->isIntegralType(Context) && !R->isPointerType()) {
Diag(D.getLocStart(), diag::err_asm_bad_register_type);
NewVD->setInvalidDecl(true);
}
}
NewVD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0),
Context, Label, 0));
} else if (!ExtnameUndeclaredIdentifiers.empty()) {
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
if (I != ExtnameUndeclaredIdentifiers.end()) {
if (isDeclExternC(NewVD)) {
NewVD->addAttr(I->second);
ExtnameUndeclaredIdentifiers.erase(I);
} else
Diag(NewVD->getLocation(), diag::warn_redefine_extname_not_applied)
<< /*Variable*/1 << NewVD;
}
}
// Find the shadowed declaration before filtering for scope.
NamedDecl *ShadowedDecl = D.getCXXScopeSpec().isEmpty()
? getShadowedDeclaration(NewVD, Previous)
: nullptr;
// Don't consider existing declarations that are in a different
// scope and are out-of-semantic-context declarations (if the new
// declaration has linkage).
FilterLookupForScope(Previous, OriginalDC, S, shouldConsiderLinkage(NewVD),
D.getCXXScopeSpec().isNotEmpty() ||
IsMemberSpecialization ||
IsVariableTemplateSpecialization);
// Check whether the previous declaration is in the same block scope. This
// affects whether we merge types with it, per C++11 [dcl.array]p3.
if (getLangOpts().CPlusPlus &&
NewVD->isLocalVarDecl() && NewVD->hasExternalStorage())
NewVD->setPreviousDeclInSameBlockScope(
Previous.isSingleResult() && !Previous.isShadowed() &&
isDeclInScope(Previous.getFoundDecl(), OriginalDC, S, false));
if (!getLangOpts().CPlusPlus) {
D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
} else {
// If this is an explicit specialization of a static data member, check it.
if (IsMemberSpecialization && !NewVD->isInvalidDecl() &&
CheckMemberSpecialization(NewVD, Previous))
NewVD->setInvalidDecl();
// Merge the decl with the existing one if appropriate.
if (!Previous.empty()) {
if (Previous.isSingleResult() &&
isa<FieldDecl>(Previous.getFoundDecl()) &&
D.getCXXScopeSpec().isSet()) {
// The user tried to define a non-static data member
// out-of-line (C++ [dcl.meaning]p1).
Diag(NewVD->getLocation(), diag::err_nonstatic_member_out_of_line)
<< D.getCXXScopeSpec().getRange();
Previous.clear();
NewVD->setInvalidDecl();
}
} else if (D.getCXXScopeSpec().isSet()) {
// No previous declaration in the qualifying scope.
Diag(D.getIdentifierLoc(), diag::err_no_member)
<< Name << computeDeclContext(D.getCXXScopeSpec(), true)
<< D.getCXXScopeSpec().getRange();
NewVD->setInvalidDecl();
}
if (!IsVariableTemplateSpecialization)
D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
if (NewTemplate) {
VarTemplateDecl *PrevVarTemplate =
NewVD->getPreviousDecl()
? NewVD->getPreviousDecl()->getDescribedVarTemplate()
: nullptr;
// Check the template parameter list of this declaration, possibly
// merging in the template parameter list from the previous variable
// template declaration.
if (CheckTemplateParameterList(
TemplateParams,
PrevVarTemplate ? PrevVarTemplate->getTemplateParameters()
: nullptr,
(D.getCXXScopeSpec().isSet() && DC && DC->isRecord() &&
DC->isDependentContext())
? TPC_ClassTemplateMember
: TPC_VarTemplate))
NewVD->setInvalidDecl();
// If we are providing an explicit specialization of a static variable
// template, make a note of that.
if (PrevVarTemplate &&
PrevVarTemplate->getInstantiatedFromMemberTemplate())
PrevVarTemplate->setMemberSpecialization();
}
}
// Diagnose shadowed variables iff this isn't a redeclaration.
if (ShadowedDecl && !D.isRedeclaration())
CheckShadow(NewVD, ShadowedDecl, Previous);
ProcessPragmaWeak(S, NewVD);
// If this is the first declaration of an extern C variable, update
// the map of such variables.
if (NewVD->isFirstDecl() && !NewVD->isInvalidDecl() &&
isIncompleteDeclExternC(*this, NewVD))
RegisterLocallyScopedExternCDecl(NewVD, S);
if (getLangOpts().CPlusPlus && NewVD->isStaticLocal()) {
Decl *ManglingContextDecl;
if (MangleNumberingContext *MCtx = getCurrentMangleNumberContext(
NewVD->getDeclContext(), ManglingContextDecl)) {
Context.setManglingNumber(
NewVD, MCtx->getManglingNumber(
NewVD, getMSManglingNumber(getLangOpts(), S)));
Context.setStaticLocalNumber(NewVD, MCtx->getStaticLocalNumber(NewVD));
}
}
// Special handling of variable named 'main'.
if (Name.getAsIdentifierInfo() && Name.getAsIdentifierInfo()->isStr("main") &&
NewVD->getDeclContext()->getRedeclContext()->isTranslationUnit() &&
!getLangOpts().Freestanding && !NewVD->getDescribedVarTemplate()) {
// C++ [basic.start.main]p3
// A program that declares a variable main at global scope is ill-formed.
if (getLangOpts().CPlusPlus)
Diag(D.getLocStart(), diag::err_main_global_variable);
// In C, and external-linkage variable named main results in undefined
// behavior.
else if (NewVD->hasExternalFormalLinkage())
Diag(D.getLocStart(), diag::warn_main_redefined);
}
if (D.isRedeclaration() && !Previous.empty()) {
NamedDecl *Prev = Previous.getRepresentativeDecl();
checkDLLAttributeRedeclaration(*this, Prev, NewVD, IsMemberSpecialization,
D.isFunctionDefinition());
}
if (NewTemplate) {
if (NewVD->isInvalidDecl())
NewTemplate->setInvalidDecl();
ActOnDocumentableDecl(NewTemplate);
return NewTemplate;
}
if (IsMemberSpecialization && !NewVD->isInvalidDecl())
CompleteMemberSpecialization(NewVD, Previous);
return NewVD;
}
/// Enum describing the %select options in diag::warn_decl_shadow.
enum ShadowedDeclKind {
SDK_Local,
SDK_Global,
SDK_StaticMember,
SDK_Field,
SDK_Typedef,
SDK_Using
};
/// Determine what kind of declaration we're shadowing.
static ShadowedDeclKind computeShadowedDeclKind(const NamedDecl *ShadowedDecl,
const DeclContext *OldDC) {
if (isa<TypeAliasDecl>(ShadowedDecl))
return SDK_Using;
else if (isa<TypedefDecl>(ShadowedDecl))
return SDK_Typedef;
else if (isa<RecordDecl>(OldDC))
return isa<FieldDecl>(ShadowedDecl) ? SDK_Field : SDK_StaticMember;
return OldDC->isFileContext() ? SDK_Global : SDK_Local;
}
/// Return the location of the capture if the given lambda captures the given
/// variable \p VD, or an invalid source location otherwise.
static SourceLocation getCaptureLocation(const LambdaScopeInfo *LSI,
const VarDecl *VD) {
for (const Capture &Capture : LSI->Captures) {
if (Capture.isVariableCapture() && Capture.getVariable() == VD)
return Capture.getLocation();
}
return SourceLocation();
}
static bool shouldWarnIfShadowedDecl(const DiagnosticsEngine &Diags,
const LookupResult &R) {
// Only diagnose if we're shadowing an unambiguous field or variable.
if (R.getResultKind() != LookupResult::Found)
return false;
// Return false if warning is ignored.
return !Diags.isIgnored(diag::warn_decl_shadow, R.getNameLoc());
}
/// Return the declaration shadowed by the given variable \p D, or null
/// if it doesn't shadow any declaration or shadowing warnings are disabled.
NamedDecl *Sema::getShadowedDeclaration(const VarDecl *D,
const LookupResult &R) {
if (!shouldWarnIfShadowedDecl(Diags, R))
return nullptr;
// Don't diagnose declarations at file scope.
if (D->hasGlobalStorage())
return nullptr;
NamedDecl *ShadowedDecl = R.getFoundDecl();
return isa<VarDecl>(ShadowedDecl) || isa<FieldDecl>(ShadowedDecl)
? ShadowedDecl
: nullptr;
}
/// Return the declaration shadowed by the given typedef \p D, or null
/// if it doesn't shadow any declaration or shadowing warnings are disabled.
NamedDecl *Sema::getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R) {
// Don't warn if typedef declaration is part of a class
if (D->getDeclContext()->isRecord())
return nullptr;
if (!shouldWarnIfShadowedDecl(Diags, R))
return nullptr;
NamedDecl *ShadowedDecl = R.getFoundDecl();
return isa<TypedefNameDecl>(ShadowedDecl) ? ShadowedDecl : nullptr;
}
/// Diagnose variable or built-in function shadowing. Implements
/// -Wshadow.
///
/// This method is called whenever a VarDecl is added to a "useful"
/// scope.
///
/// \param ShadowedDecl the declaration that is shadowed by the given variable
/// \param R the lookup of the name
///
void Sema::CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R) {
DeclContext *NewDC = D->getDeclContext();
if (FieldDecl *FD = dyn_cast<FieldDecl>(ShadowedDecl)) {
// Fields are not shadowed by variables in C++ static methods.
if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewDC))
if (MD->isStatic())
return;
// Fields shadowed by constructor parameters are a special case. Usually
// the constructor initializes the field with the parameter.
if (isa<CXXConstructorDecl>(NewDC))
if (const auto PVD = dyn_cast<ParmVarDecl>(D)) {
// Remember that this was shadowed so we can either warn about its
// modification or its existence depending on warning settings.
ShadowingDecls.insert({PVD->getCanonicalDecl(), FD});
return;
}
}
if (VarDecl *shadowedVar = dyn_cast<VarDecl>(ShadowedDecl))
if (shadowedVar->isExternC()) {
// For shadowing external vars, make sure that we point to the global
// declaration, not a locally scoped extern declaration.
for (auto I : shadowedVar->redecls())
if (I->isFileVarDecl()) {
ShadowedDecl = I;
break;
}
}
DeclContext *OldDC = ShadowedDecl->getDeclContext()->getRedeclContext();
unsigned WarningDiag = diag::warn_decl_shadow;
SourceLocation CaptureLoc;
if (isa<VarDecl>(D) && isa<VarDecl>(ShadowedDecl) && NewDC &&
isa<CXXMethodDecl>(NewDC)) {
if (const auto *RD = dyn_cast<CXXRecordDecl>(NewDC->getParent())) {
if (RD->isLambda() && OldDC->Encloses(NewDC->getLexicalParent())) {
if (RD->getLambdaCaptureDefault() == LCD_None) {
// Try to avoid warnings for lambdas with an explicit capture list.
const auto *LSI = cast<LambdaScopeInfo>(getCurFunction());
// Warn only when the lambda captures the shadowed decl explicitly.
CaptureLoc = getCaptureLocation(LSI, cast<VarDecl>(ShadowedDecl));
if (CaptureLoc.isInvalid())
WarningDiag = diag::warn_decl_shadow_uncaptured_local;
} else {
// Remember that this was shadowed so we can avoid the warning if the
// shadowed decl isn't captured and the warning settings allow it.
cast<LambdaScopeInfo>(getCurFunction())
->ShadowingDecls.push_back(
{cast<VarDecl>(D), cast<VarDecl>(ShadowedDecl)});
return;
}
}
if (cast<VarDecl>(ShadowedDecl)->hasLocalStorage()) {
// A variable can't shadow a local variable in an enclosing scope, if
// they are separated by a non-capturing declaration context.
for (DeclContext *ParentDC = NewDC;
ParentDC && !ParentDC->Equals(OldDC);
ParentDC = getLambdaAwareParentOfDeclContext(ParentDC)) {
// Only block literals, captured statements, and lambda expressions
// can capture; other scopes don't.
if (!isa<BlockDecl>(ParentDC) && !isa<CapturedDecl>(ParentDC) &&
!isLambdaCallOperator(ParentDC)) {
return;
}
}
}
}
}
// Only warn about certain kinds of shadowing for class members.
if (NewDC && NewDC->isRecord()) {
// In particular, don't warn about shadowing non-class members.
if (!OldDC->isRecord())
return;
// TODO: should we warn about static data members shadowing
// static data members from base classes?
// TODO: don't diagnose for inaccessible shadowed members.
// This is hard to do perfectly because we might friend the
// shadowing context, but that's just a false negative.
}
DeclarationName Name = R.getLookupName();
// Emit warning and note.
if (getSourceManager().isInSystemMacro(R.getNameLoc()))
return;
ShadowedDeclKind Kind = computeShadowedDeclKind(ShadowedDecl, OldDC);
Diag(R.getNameLoc(), WarningDiag) << Name << Kind << OldDC;
if (!CaptureLoc.isInvalid())
Diag(CaptureLoc, diag::note_var_explicitly_captured_here)
<< Name << /*explicitly*/ 1;
Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
}
/// Diagnose shadowing for variables shadowed in the lambda record \p LambdaRD
/// when these variables are captured by the lambda.
void Sema::DiagnoseShadowingLambdaDecls(const LambdaScopeInfo *LSI) {
for (const auto &Shadow : LSI->ShadowingDecls) {
const VarDecl *ShadowedDecl = Shadow.ShadowedDecl;
// Try to avoid the warning when the shadowed decl isn't captured.
SourceLocation CaptureLoc = getCaptureLocation(LSI, ShadowedDecl);
const DeclContext *OldDC = ShadowedDecl->getDeclContext();
Diag(Shadow.VD->getLocation(), CaptureLoc.isInvalid()
? diag::warn_decl_shadow_uncaptured_local
: diag::warn_decl_shadow)
<< Shadow.VD->getDeclName()
<< computeShadowedDeclKind(ShadowedDecl, OldDC) << OldDC;
if (!CaptureLoc.isInvalid())
Diag(CaptureLoc, diag::note_var_explicitly_captured_here)
<< Shadow.VD->getDeclName() << /*explicitly*/ 0;
Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
}
}
/// Check -Wshadow without the advantage of a previous lookup.
void Sema::CheckShadow(Scope *S, VarDecl *D) {
if (Diags.isIgnored(diag::warn_decl_shadow, D->getLocation()))
return;
LookupResult R(*this, D->getDeclName(), D->getLocation(),
Sema::LookupOrdinaryName, Sema::ForVisibleRedeclaration);
LookupName(R, S);
if (NamedDecl *ShadowedDecl = getShadowedDeclaration(D, R))
CheckShadow(D, ShadowedDecl, R);
}
/// Check if 'E', which is an expression that is about to be modified, refers
/// to a constructor parameter that shadows a field.
void Sema::CheckShadowingDeclModification(Expr *E, SourceLocation Loc) {
// Quickly ignore expressions that can't be shadowing ctor parameters.
if (!getLangOpts().CPlusPlus || ShadowingDecls.empty())
return;
E = E->IgnoreParenImpCasts();
auto *DRE = dyn_cast<DeclRefExpr>(E);
if (!DRE)
return;
const NamedDecl *D = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
auto I = ShadowingDecls.find(D);
if (I == ShadowingDecls.end())
return;
const NamedDecl *ShadowedDecl = I->second;
const DeclContext *OldDC = ShadowedDecl->getDeclContext();
Diag(Loc, diag::warn_modifying_shadowing_decl) << D << OldDC;
Diag(D->getLocation(), diag::note_var_declared_here) << D;
Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
// Avoid issuing multiple warnings about the same decl.
ShadowingDecls.erase(I);
}
/// Check for conflict between this global or extern "C" declaration and
/// previous global or extern "C" declarations. This is only used in C++.
template<typename T>
static bool checkGlobalOrExternCConflict(
Sema &S, const T *ND, bool IsGlobal, LookupResult &Previous) {
assert(S.getLangOpts().CPlusPlus && "only C++ has extern \"C\"");
NamedDecl *Prev = S.findLocallyScopedExternCDecl(ND->getDeclName());
if (!Prev && IsGlobal && !isIncompleteDeclExternC(S, ND)) {
// The common case: this global doesn't conflict with any extern "C"
// declaration.
return false;
}
if (Prev) {
if (!IsGlobal || isIncompleteDeclExternC(S, ND)) {
// Both the old and new declarations have C language linkage. This is a
// redeclaration.
Previous.clear();
Previous.addDecl(Prev);
return true;
}
// This is a global, non-extern "C" declaration, and there is a previous
// non-global extern "C" declaration. Diagnose if this is a variable
// declaration.
if (!isa<VarDecl>(ND))
return false;
} else {
// The declaration is extern "C". Check for any declaration in the
// translation unit which might conflict.
if (IsGlobal) {
// We have already performed the lookup into the translation unit.
IsGlobal = false;
for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
I != E; ++I) {
if (isa<VarDecl>(*I)) {
Prev = *I;
break;
}
}
} else {
DeclContext::lookup_result R =
S.Context.getTranslationUnitDecl()->lookup(ND->getDeclName());
for (DeclContext::lookup_result::iterator I = R.begin(), E = R.end();
I != E; ++I) {
if (isa<VarDecl>(*I)) {
Prev = *I;
break;
}
// FIXME: If we have any other entity with this name in global scope,
// the declaration is ill-formed, but that is a defect: it breaks the
// 'stat' hack, for instance. Only variables can have mangled name
// clashes with extern "C" declarations, so only they deserve a
// diagnostic.
}
}
if (!Prev)
return false;
}
// Use the first declaration's location to ensure we point at something which
// is lexically inside an extern "C" linkage-spec.
assert(Prev && "should have found a previous declaration to diagnose");
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Prev))
Prev = FD->getFirstDecl();
else
Prev = cast<VarDecl>(Prev)->getFirstDecl();
S.Diag(ND->getLocation(), diag::err_extern_c_global_conflict)
<< IsGlobal << ND;
S.Diag(Prev->getLocation(), diag::note_extern_c_global_conflict)
<< IsGlobal;
return false;
}
/// Apply special rules for handling extern "C" declarations. Returns \c true
/// if we have found that this is a redeclaration of some prior entity.
///
/// Per C++ [dcl.link]p6:
/// Two declarations [for a function or variable] with C language linkage
/// with the same name that appear in different scopes refer to the same
/// [entity]. An entity with C language linkage shall not be declared with
/// the same name as an entity in global scope.
template<typename T>
static bool checkForConflictWithNonVisibleExternC(Sema &S, const T *ND,
LookupResult &Previous) {
if (!S.getLangOpts().CPlusPlus) {
// In C, when declaring a global variable, look for a corresponding 'extern'
// variable declared in function scope. We don't need this in C++, because
// we find local extern decls in the surrounding file-scope DeclContext.
if (ND->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
if (NamedDecl *Prev = S.findLocallyScopedExternCDecl(ND->getDeclName())) {
Previous.clear();
Previous.addDecl(Prev);
return true;
}
}
return false;
}
// A declaration in the translation unit can conflict with an extern "C"
// declaration.
if (ND->getDeclContext()->getRedeclContext()->isTranslationUnit())
return checkGlobalOrExternCConflict(S, ND, /*IsGlobal*/true, Previous);
// An extern "C" declaration can conflict with a declaration in the
// translation unit or can be a redeclaration of an extern "C" declaration
// in another scope.
if (isIncompleteDeclExternC(S,ND))
return checkGlobalOrExternCConflict(S, ND, /*IsGlobal*/false, Previous);
// Neither global nor extern "C": nothing to do.
return false;
}
void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
// If the decl is already known invalid, don't check it.
if (NewVD->isInvalidDecl())
return;
QualType T = NewVD->getType();
// Defer checking an 'auto' type until its initializer is attached.
if (T->isUndeducedType())
return;
if (NewVD->hasAttrs())
CheckAlignasUnderalignment(NewVD);
if (T->isObjCObjectType()) {
Diag(NewVD->getLocation(), diag::err_statically_allocated_object)
<< FixItHint::CreateInsertion(NewVD->getLocation(), "*");
T = Context.getObjCObjectPointerType(T);
NewVD->setType(T);
}
// Emit an error if an address space was applied to decl with local storage.
// This includes arrays of objects with address space qualifiers, but not
// automatic variables that point to other address spaces.
// ISO/IEC TR 18037 S5.1.2
if (!getLangOpts().OpenCL && NewVD->hasLocalStorage() &&
T.getAddressSpace() != LangAS::Default) {
Diag(NewVD->getLocation(), diag::err_as_qualified_auto_decl) << 0;
NewVD->setInvalidDecl();
return;
}
// OpenCL v1.2 s6.8 - The static qualifier is valid only in program
// scope.
if (getLangOpts().OpenCLVersion == 120 &&
!getOpenCLOptions().isEnabled("cl_clang_storage_class_specifiers") &&
NewVD->isStaticLocal()) {
Diag(NewVD->getLocation(), diag::err_static_function_scope);
NewVD->setInvalidDecl();
return;
}
if (getLangOpts().OpenCL) {
// OpenCL v2.0 s6.12.5 - The __block storage type is not supported.
if (NewVD->hasAttr<BlocksAttr>()) {
Diag(NewVD->getLocation(), diag::err_opencl_block_storage_type);
return;
}
if (T->isBlockPointerType()) {
// OpenCL v2.0 s6.12.5 - Any block declaration must be const qualified and
// can't use 'extern' storage class.
if (!T.isConstQualified()) {
Diag(NewVD->getLocation(), diag::err_opencl_invalid_block_declaration)
<< 0 /*const*/;
NewVD->setInvalidDecl();
return;
}
if (NewVD->hasExternalStorage()) {
Diag(NewVD->getLocation(), diag::err_opencl_extern_block_declaration);
NewVD->setInvalidDecl();
return;
}
}
// OpenCL v1.2 s6.5 - All program scope variables must be declared in the
// __constant address space.
// OpenCL v2.0 s6.5.1 - Variables defined at program scope and static
// variables inside a function can also be declared in the global
// address space.
if (NewVD->isFileVarDecl() || NewVD->isStaticLocal() ||
NewVD->hasExternalStorage()) {
if (!T->isSamplerT() &&
!(T.getAddressSpace() == LangAS::opencl_constant ||
(T.getAddressSpace() == LangAS::opencl_global &&
getLangOpts().OpenCLVersion == 200))) {
int Scope = NewVD->isStaticLocal() | NewVD->hasExternalStorage() << 1;
if (getLangOpts().OpenCLVersion == 200)
Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
<< Scope << "global or constant";
else
Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
<< Scope << "constant";
NewVD->setInvalidDecl();
return;
}
} else {
if (T.getAddressSpace() == LangAS::opencl_global) {
Diag(NewVD->getLocation(), diag::err_opencl_function_variable)
<< 1 /*is any function*/ << "global";
NewVD->setInvalidDecl();
return;
}
if (T.getAddressSpace() == LangAS::opencl_constant ||
T.getAddressSpace() == LangAS::opencl_local) {
FunctionDecl *FD = getCurFunctionDecl();
// OpenCL v1.1 s6.5.2 and s6.5.3: no local or constant variables
// in functions.
if (FD && !FD->hasAttr<OpenCLKernelAttr>()) {
if (T.getAddressSpace() == LangAS::opencl_constant)
Diag(NewVD->getLocation(), diag::err_opencl_function_variable)
<< 0 /*non-kernel only*/ << "constant";
else
Diag(NewVD->getLocation(), diag::err_opencl_function_variable)
<< 0 /*non-kernel only*/ << "local";
NewVD->setInvalidDecl();
return;
}
// OpenCL v2.0 s6.5.2 and s6.5.3: local and constant variables must be
// in the outermost scope of a kernel function.
if (FD && FD->hasAttr<OpenCLKernelAttr>()) {
if (!getCurScope()->isFunctionScope()) {
if (T.getAddressSpace() == LangAS::opencl_constant)
Diag(NewVD->getLocation(), diag::err_opencl_addrspace_scope)
<< "constant";
else
Diag(NewVD->getLocation(), diag::err_opencl_addrspace_scope)
<< "local";
NewVD->setInvalidDecl();
return;
}
}
} else if (T.getAddressSpace() != LangAS::opencl_private) {
// Do not allow other address spaces on automatic variable.
Diag(NewVD->getLocation(), diag::err_as_qualified_auto_decl) << 1;
NewVD->setInvalidDecl();
return;
}
}
}
if (NewVD->hasLocalStorage() && T.isObjCGCWeak()
&& !NewVD->hasAttr<BlocksAttr>()) {
if (getLangOpts().getGC() != LangOptions::NonGC)
Diag(NewVD->getLocation(), diag::warn_gc_attribute_weak_on_local);
else {
assert(!getLangOpts().ObjCAutoRefCount);
Diag(NewVD->getLocation(), diag::warn_attribute_weak_on_local);
}
}
bool isVM = T->isVariablyModifiedType();
if (isVM || NewVD->hasAttr<CleanupAttr>() ||
NewVD->hasAttr<BlocksAttr>())
setFunctionHasBranchProtectedScope();
if ((isVM && NewVD->hasLinkage()) ||
(T->isVariableArrayType() && NewVD->hasGlobalStorage())) {
bool SizeIsNegative;
llvm::APSInt Oversized;
TypeSourceInfo *FixedTInfo = TryToFixInvalidVariablyModifiedTypeSourceInfo(
NewVD->getTypeSourceInfo(), Context, SizeIsNegative, Oversized);
QualType FixedT;
if (FixedTInfo && T == NewVD->getTypeSourceInfo()->getType())
FixedT = FixedTInfo->getType();
else if (FixedTInfo) {
// Type and type-as-written are canonically different. We need to fix up
// both types separately.
FixedT = TryToFixInvalidVariablyModifiedType(T, Context, SizeIsNegative,
Oversized);
}
if ((!FixedTInfo || FixedT.isNull()) && T->isVariableArrayType()) {
const VariableArrayType *VAT = Context.getAsVariableArrayType(T);
// FIXME: This won't give the correct result for
// int a[10][n];
SourceRange SizeRange = VAT->getSizeExpr()->getSourceRange();
if (NewVD->isFileVarDecl())
Diag(NewVD->getLocation(), diag::err_vla_decl_in_file_scope)
<< SizeRange;
else if (NewVD->isStaticLocal())
Diag(NewVD->getLocation(), diag::err_vla_decl_has_static_storage)
<< SizeRange;
else
Diag(NewVD->getLocation(), diag::err_vla_decl_has_extern_linkage)
<< SizeRange;
NewVD->setInvalidDecl();
return;
}
if (!FixedTInfo) {
if (NewVD->isFileVarDecl())
Diag(NewVD->getLocation(), diag::err_vm_decl_in_file_scope);
else
Diag(NewVD->getLocation(), diag::err_vm_decl_has_extern_linkage);
NewVD->setInvalidDecl();
return;
}
Diag(NewVD->getLocation(), diag::warn_illegal_constant_array_size);
NewVD->setType(FixedT);
NewVD->setTypeSourceInfo(FixedTInfo);
}
if (T->isVoidType()) {
// C++98 [dcl.stc]p5: The extern specifier can be applied only to the names
// of objects and functions.
if (NewVD->isThisDeclarationADefinition() || getLangOpts().CPlusPlus) {
Diag(NewVD->getLocation(), diag::err_typecheck_decl_incomplete_type)
<< T;
NewVD->setInvalidDecl();
return;
}
}
if (!NewVD->hasLocalStorage() && NewVD->hasAttr<BlocksAttr>()) {
Diag(NewVD->getLocation(), diag::err_block_on_nonlocal);
NewVD->setInvalidDecl();
return;
}
if (isVM && NewVD->hasAttr<BlocksAttr>()) {
Diag(NewVD->getLocation(), diag::err_block_on_vm);
NewVD->setInvalidDecl();
return;
}
if (NewVD->isConstexpr() && !T->isDependentType() &&
RequireLiteralType(NewVD->getLocation(), T,
diag::err_constexpr_var_non_literal)) {
NewVD->setInvalidDecl();
return;
}
}
/// Perform semantic checking on a newly-created variable
/// declaration.
///
/// This routine performs all of the type-checking required for a
/// variable declaration once it has been built. It is used both to
/// check variables after they have been parsed and their declarators
/// have been translated into a declaration, and to check variables
/// that have been instantiated from a template.
///
/// Sets NewVD->isInvalidDecl() if an error was encountered.
///
/// Returns true if the variable declaration is a redeclaration.
bool Sema::CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous) {
CheckVariableDeclarationType(NewVD);
// If the decl is already known invalid, don't check it.
if (NewVD->isInvalidDecl())
return false;
// If we did not find anything by this name, look for a non-visible
// extern "C" declaration with the same name.
if (Previous.empty() &&
checkForConflictWithNonVisibleExternC(*this, NewVD, Previous))
Previous.setShadowed();
if (!Previous.empty()) {
MergeVarDecl(NewVD, Previous);
return true;
}
return false;
}
namespace {
struct FindOverriddenMethod {
Sema *S;
CXXMethodDecl *Method;
/// Member lookup function that determines whether a given C++
/// method overrides a method in a base class, to be used with
/// CXXRecordDecl::lookupInBases().
bool operator()(const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
RecordDecl *BaseRecord =
Specifier->getType()->getAs<RecordType>()->getDecl();
DeclarationName Name = Method->getDeclName();
// FIXME: Do we care about other names here too?
if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
// We really want to find the base class destructor here.
QualType T = S->Context.getTypeDeclType(BaseRecord);
CanQualType CT = S->Context.getCanonicalType(T);
Name = S->Context.DeclarationNames.getCXXDestructorName(CT);
}
for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
Path.Decls = Path.Decls.slice(1)) {
NamedDecl *D = Path.Decls.front();
if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
if (MD->isVirtual() && !S->IsOverload(Method, MD, false))
return true;
}
}
return false;
}
};
enum OverrideErrorKind { OEK_All, OEK_NonDeleted, OEK_Deleted };
} // end anonymous namespace
/// Report an error regarding overriding, along with any relevant
/// overridden methods.
///
/// \param DiagID the primary error to report.
/// \param MD the overriding method.
/// \param OEK which overrides to include as notes.
static void ReportOverrides(Sema& S, unsigned DiagID, const CXXMethodDecl *MD,
OverrideErrorKind OEK = OEK_All) {
S.Diag(MD->getLocation(), DiagID) << MD->getDeclName();
for (const CXXMethodDecl *O : MD->overridden_methods()) {
// This check (& the OEK parameter) could be replaced by a predicate, but
// without lambdas that would be overkill. This is still nicer than writing
// out the diag loop 3 times.
if ((OEK == OEK_All) ||
(OEK == OEK_NonDeleted && !O->isDeleted()) ||
(OEK == OEK_Deleted && O->isDeleted()))
S.Diag(O->getLocation(), diag::note_overridden_virtual_function);
}
}
/// AddOverriddenMethods - See if a method overrides any in the base classes,
/// and if so, check that it's a valid override and remember it.
bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
// Look for methods in base classes that this method might override.
CXXBasePaths Paths;
FindOverriddenMethod FOM;
FOM.Method = MD;
FOM.S = this;
bool hasDeletedOverridenMethods = false;
bool hasNonDeletedOverridenMethods = false;
bool AddedAny = false;
if (DC->lookupInBases(FOM, Paths)) {
for (auto *I : Paths.found_decls()) {
if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(I)) {
MD->addOverriddenMethod(OldMD->getCanonicalDecl());
if (!CheckOverridingFunctionReturnType(MD, OldMD) &&
!CheckOverridingFunctionAttributes(MD, OldMD) &&
!CheckOverridingFunctionExceptionSpec(MD, OldMD) &&
!CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) {
hasDeletedOverridenMethods |= OldMD->isDeleted();
hasNonDeletedOverridenMethods |= !OldMD->isDeleted();
AddedAny = true;
}
}
}
}
if (hasDeletedOverridenMethods && !MD->isDeleted()) {
ReportOverrides(*this, diag::err_non_deleted_override, MD, OEK_Deleted);
}
if (hasNonDeletedOverridenMethods && MD->isDeleted()) {
ReportOverrides(*this, diag::err_deleted_override, MD, OEK_NonDeleted);
}
return AddedAny;
}
namespace {
// Struct for holding all of the extra arguments needed by
// DiagnoseInvalidRedeclaration to call Sema::ActOnFunctionDeclarator.
struct ActOnFDArgs {
Scope *S;
Declarator &D;
MultiTemplateParamsArg TemplateParamLists;
bool AddToScope;
};
} // end anonymous namespace
namespace {
// Callback to only accept typo corrections that have a non-zero edit distance.
// Also only accept corrections that have the same parent decl.
class DifferentNameValidatorCCC : public CorrectionCandidateCallback {
public:
DifferentNameValidatorCCC(ASTContext &Context, FunctionDecl *TypoFD,
CXXRecordDecl *Parent)
: Context(Context), OriginalFD(TypoFD),
ExpectedParent(Parent ? Parent->getCanonicalDecl() : nullptr) {}
bool ValidateCandidate(const TypoCorrection &candidate) override {
if (candidate.getEditDistance() == 0)
return false;
SmallVector<unsigned, 1> MismatchedParams;
for (TypoCorrection::const_decl_iterator CDecl = candidate.begin(),
CDeclEnd = candidate.end();
CDecl != CDeclEnd; ++CDecl) {
FunctionDecl *FD = dyn_cast<FunctionDecl>(*CDecl);
if (FD && !FD->hasBody() &&
hasSimilarParameters(Context, FD, OriginalFD, MismatchedParams)) {
if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
CXXRecordDecl *Parent = MD->getParent();
if (Parent && Parent->getCanonicalDecl() == ExpectedParent)
return true;
} else if (!ExpectedParent) {
return true;
}
}
}
return false;
}
private:
ASTContext &Context;
FunctionDecl *OriginalFD;
CXXRecordDecl *ExpectedParent;
};
} // end anonymous namespace
void Sema::MarkTypoCorrectedFunctionDefinition(const NamedDecl *F) {
TypoCorrectedFunctionDefinitions.insert(F);
}
/// Generate diagnostics for an invalid function redeclaration.
///
/// This routine handles generating the diagnostic messages for an invalid
/// function redeclaration, including finding possible similar declarations
/// or performing typo correction if there are no previous declarations with
/// the same name.
///
/// Returns a NamedDecl iff typo correction was performed and substituting in
/// the new declaration name does not cause new errors.
static NamedDecl *DiagnoseInvalidRedeclaration(
Sema &SemaRef, LookupResult &Previous, FunctionDecl *NewFD,
ActOnFDArgs &ExtraArgs, bool IsLocalFriend, Scope *S) {
DeclarationName Name = NewFD->getDeclName();
DeclContext *NewDC = NewFD->getDeclContext();
SmallVector<unsigned, 1> MismatchedParams;
SmallVector<std::pair<FunctionDecl *, unsigned>, 1> NearMatches;
TypoCorrection Correction;
bool IsDefinition = ExtraArgs.D.isFunctionDefinition();
unsigned DiagMsg = IsLocalFriend ? diag::err_no_matching_local_friend
: diag::err_member_decl_does_not_match;
LookupResult Prev(SemaRef, Name, NewFD->getLocation(),
IsLocalFriend ? Sema::LookupLocalFriendName
: Sema::LookupOrdinaryName,
Sema::ForVisibleRedeclaration);
NewFD->setInvalidDecl();
if (IsLocalFriend)
SemaRef.LookupName(Prev, S);
else
SemaRef.LookupQualifiedName(Prev, NewDC);
assert(!Prev.isAmbiguous() &&
"Cannot have an ambiguity in previous-declaration lookup");
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
if (!Prev.empty()) {
for (LookupResult::iterator Func = Prev.begin(), FuncEnd = Prev.end();
Func != FuncEnd; ++Func) {
FunctionDecl *FD = dyn_cast<FunctionDecl>(*Func);
if (FD &&
hasSimilarParameters(SemaRef.Context, FD, NewFD, MismatchedParams)) {
// Add 1 to the index so that 0 can mean the mismatch didn't
// involve a parameter
unsigned ParamNum =
MismatchedParams.empty() ? 0 : MismatchedParams.front() + 1;
NearMatches.push_back(std::make_pair(FD, ParamNum));
}
}
// If the qualified name lookup yielded nothing, try typo correction
} else if ((Correction = SemaRef.CorrectTypo(
Prev.getLookupNameInfo(), Prev.getLookupKind(), S,
&ExtraArgs.D.getCXXScopeSpec(),
llvm::make_unique<DifferentNameValidatorCCC>(
SemaRef.Context, NewFD, MD ? MD->getParent() : nullptr),
Sema::CTK_ErrorRecovery, IsLocalFriend ? nullptr : NewDC))) {
// Set up everything for the call to ActOnFunctionDeclarator
ExtraArgs.D.SetIdentifier(Correction.getCorrectionAsIdentifierInfo(),
ExtraArgs.D.getIdentifierLoc());
Previous.clear();
Previous.setLookupName(Correction.getCorrection());
for (TypoCorrection::decl_iterator CDecl = Correction.begin(),
CDeclEnd = Correction.end();
CDecl != CDeclEnd; ++CDecl) {
FunctionDecl *FD = dyn_cast<FunctionDecl>(*CDecl);
if (FD && !FD->hasBody() &&
hasSimilarParameters(SemaRef.Context, FD, NewFD, MismatchedParams)) {
Previous.addDecl(FD);
}
}
bool wasRedeclaration = ExtraArgs.D.isRedeclaration();
NamedDecl *Result;
// Retry building the function declaration with the new previous
// declarations, and with errors suppressed.
{
// Trap errors.
Sema::SFINAETrap Trap(SemaRef);
// TODO: Refactor ActOnFunctionDeclarator so that we can call only the
// pieces need to verify the typo-corrected C++ declaration and hopefully
// eliminate the need for the parameter pack ExtraArgs.
Result = SemaRef.ActOnFunctionDeclarator(
ExtraArgs.S, ExtraArgs.D,
Correction.getCorrectionDecl()->getDeclContext(),
NewFD->getTypeSourceInfo(), Previous, ExtraArgs.TemplateParamLists,
ExtraArgs.AddToScope);
if (Trap.hasErrorOccurred())
Result = nullptr;
}
if (Result) {
// Determine which correction we picked.
Decl *Canonical = Result->getCanonicalDecl();
for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
I != E; ++I)
if ((*I)->getCanonicalDecl() == Canonical)
Correction.setCorrectionDecl(*I);
// Let Sema know about the correction.
SemaRef.MarkTypoCorrectedFunctionDefinition(Result);
SemaRef.diagnoseTypo(
Correction,
SemaRef.PDiag(IsLocalFriend
? diag::err_no_matching_local_friend_suggest
: diag::err_member_decl_does_not_match_suggest)
<< Name << NewDC << IsDefinition);
return Result;
}
// Pretend the typo correction never occurred
ExtraArgs.D.SetIdentifier(Name.getAsIdentifierInfo(),
ExtraArgs.D.getIdentifierLoc());
ExtraArgs.D.setRedeclaration(wasRedeclaration);
Previous.clear();
Previous.setLookupName(Name);
}
SemaRef.Diag(NewFD->getLocation(), DiagMsg)
<< Name << NewDC << IsDefinition << NewFD->getLocation();
bool NewFDisConst = false;
if (CXXMethodDecl *NewMD = dyn_cast<CXXMethodDecl>(NewFD))
NewFDisConst = NewMD->isConst();
for (SmallVectorImpl<std::pair<FunctionDecl *, unsigned> >::iterator
NearMatch = NearMatches.begin(), NearMatchEnd = NearMatches.end();
NearMatch != NearMatchEnd; ++NearMatch) {
FunctionDecl *FD = NearMatch->first;
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
bool FDisConst = MD && MD->isConst();
bool IsMember = MD || !IsLocalFriend;
// FIXME: These notes are poorly worded for the local friend case.
if (unsigned Idx = NearMatch->second) {
ParmVarDecl *FDParam = FD->getParamDecl(Idx-1);
SourceLocation Loc = FDParam->getTypeSpecStartLoc();
if (Loc.isInvalid()) Loc = FD->getLocation();
SemaRef.Diag(Loc, IsMember ? diag::note_member_def_close_param_match
: diag::note_local_decl_close_param_match)
<< Idx << FDParam->getType()
<< NewFD->getParamDecl(Idx - 1)->getType();
} else if (FDisConst != NewFDisConst) {
SemaRef.Diag(FD->getLocation(), diag::note_member_def_close_const_match)
<< NewFDisConst << FD->getSourceRange().getEnd();
} else
SemaRef.Diag(FD->getLocation(),
IsMember ? diag::note_member_def_close_match
: diag::note_local_decl_close_match);
}
return nullptr;
}
static StorageClass getFunctionStorageClass(Sema &SemaRef, Declarator &D) {
switch (D.getDeclSpec().getStorageClassSpec()) {
default: llvm_unreachable("Unknown storage class!");
case DeclSpec::SCS_auto:
case DeclSpec::SCS_register:
case DeclSpec::SCS_mutable:
SemaRef.Diag(D.getDeclSpec().getStorageClassSpecLoc(),
diag::err_typecheck_sclass_func);
D.getMutableDeclSpec().ClearStorageClassSpecs();
D.setInvalidType();
break;
case DeclSpec::SCS_unspecified: break;
case DeclSpec::SCS_extern:
if (D.getDeclSpec().isExternInLinkageSpec())
return SC_None;
return SC_Extern;
case DeclSpec::SCS_static: {
if (SemaRef.CurContext->getRedeclContext()->isFunctionOrMethod()) {
// C99 6.7.1p5:
// The declaration of an identifier for a function that has
// block scope shall have no explicit storage-class specifier
// other than extern
// See also (C++ [dcl.stc]p4).
SemaRef.Diag(D.getDeclSpec().getStorageClassSpecLoc(),
diag::err_static_block_func);
break;
} else
return SC_Static;
}
case DeclSpec::SCS_private_extern: return SC_PrivateExtern;
}
// No explicit storage class has already been returned
return SC_None;
}
static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
DeclContext *DC, QualType &R,
TypeSourceInfo *TInfo,
StorageClass SC,
bool &IsVirtualOkay) {
DeclarationNameInfo NameInfo = SemaRef.GetNameForDeclarator(D);
DeclarationName Name = NameInfo.getName();
FunctionDecl *NewFD = nullptr;
bool isInline = D.getDeclSpec().isInlineSpecified();
if (!SemaRef.getLangOpts().CPlusPlus) {
// Determine whether the function was written with a
// prototype. This true when:
// - there is a prototype in the declarator, or
// - the type R of the function is some kind of typedef or other non-
// attributed reference to a type name (which eventually refers to a
// function type).
bool HasPrototype =
(D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) ||
(!R->getAsAdjusted<FunctionType>() && R->isFunctionProtoType());
NewFD = FunctionDecl::Create(SemaRef.Context, DC,
D.getLocStart(), NameInfo, R,
TInfo, SC, isInline,
HasPrototype, false);
if (D.isInvalidType())
NewFD->setInvalidDecl();
return NewFD;
}
bool isExplicit = D.getDeclSpec().isExplicitSpecified();
bool isConstexpr = D.getDeclSpec().isConstexprSpecified();
// Check that the return type is not an abstract class type.
// For record types, this is done by the AbstractClassUsageDiagnoser once
// the class has been completely parsed.
if (!DC->isRecord() &&
SemaRef.RequireNonAbstractType(
D.getIdentifierLoc(), R->getAs<FunctionType>()->getReturnType(),
diag::err_abstract_type_in_decl, SemaRef.AbstractReturnType))
D.setInvalidType();
if (Name.getNameKind() == DeclarationName::CXXConstructorName) {
// This is a C++ constructor declaration.
assert(DC->isRecord() &&
"Constructors can only be declared in a member context");
R = SemaRef.CheckConstructorDeclarator(D, R, SC);
return CXXConstructorDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC),
D.getLocStart(), NameInfo,
R, TInfo, isExplicit, isInline,
/*isImplicitlyDeclared=*/false,
isConstexpr);
} else if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
// This is a C++ destructor declaration.
if (DC->isRecord()) {
R = SemaRef.CheckDestructorDeclarator(D, R, SC);
CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
CXXDestructorDecl *NewDD = CXXDestructorDecl::Create(
SemaRef.Context, Record,
D.getLocStart(),
NameInfo, R, TInfo, isInline,
/*isImplicitlyDeclared=*/false);
// If the class is complete, then we now create the implicit exception
// specification. If the class is incomplete or dependent, we can't do
// it yet.
if (SemaRef.getLangOpts().CPlusPlus11 && !Record->isDependentType() &&
Record->getDefinition() && !Record->isBeingDefined() &&
R->getAs<FunctionProtoType>()->getExceptionSpecType() == EST_None) {
SemaRef.AdjustDestructorExceptionSpec(Record, NewDD);
}
IsVirtualOkay = true;
return NewDD;
} else {
SemaRef.Diag(D.getIdentifierLoc(), diag::err_destructor_not_member);
D.setInvalidType();
// Create a FunctionDecl to satisfy the function definition parsing
// code path.
return FunctionDecl::Create(SemaRef.Context, DC,
D.getLocStart(),
D.getIdentifierLoc(), Name, R, TInfo,
SC, isInline,
/*hasPrototype=*/true, isConstexpr);
}
} else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
if (!DC->isRecord()) {
SemaRef.Diag(D.getIdentifierLoc(),
diag::err_conv_function_not_member);
return nullptr;
}
SemaRef.CheckConversionDeclarator(D, R, SC);
IsVirtualOkay = true;
return CXXConversionDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC),
D.getLocStart(), NameInfo,
R, TInfo, isInline, isExplicit,
isConstexpr, SourceLocation());
} else if (Name.getNameKind() == DeclarationName::CXXDeductionGuideName) {
SemaRef.CheckDeductionGuideDeclarator(D, R, SC);
return CXXDeductionGuideDecl::Create(SemaRef.Context, DC, D.getLocStart(),
isExplicit, NameInfo, R, TInfo,
D.getLocEnd());
} else if (DC->isRecord()) {
// If the name of the function is the same as the name of the record,
// then this must be an invalid constructor that has a return type.
// (The parser checks for a return type and makes the declarator a
// constructor if it has no return type).
if (Name.getAsIdentifierInfo() &&
Name.getAsIdentifierInfo() == cast<CXXRecordDecl>(DC)->getIdentifier()){
SemaRef.Diag(D.getIdentifierLoc(), diag::err_constructor_return_type)
<< SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
<< SourceRange(D.getIdentifierLoc());
return nullptr;
}
// This is a C++ method declaration.
CXXMethodDecl *Ret = CXXMethodDecl::Create(SemaRef.Context,
cast<CXXRecordDecl>(DC),
D.getLocStart(), NameInfo, R,
TInfo, SC, isInline,
isConstexpr, SourceLocation());
IsVirtualOkay = !Ret->isStatic();
return Ret;
} else {
bool isFriend =
SemaRef.getLangOpts().CPlusPlus && D.getDeclSpec().isFriendSpecified();
if (!isFriend && SemaRef.CurContext->isRecord())
return nullptr;
// Determine whether the function was written with a
// prototype. This true when:
// - we're in C++ (where every function has a prototype),
return FunctionDecl::Create(SemaRef.Context, DC,
D.getLocStart(),
NameInfo, R, TInfo, SC, isInline,
true/*HasPrototype*/, isConstexpr);
}
}
enum OpenCLParamType {
ValidKernelParam,
PtrPtrKernelParam,
PtrKernelParam,
InvalidAddrSpacePtrKernelParam,
InvalidKernelParam,
RecordKernelParam
};
static bool isOpenCLSizeDependentType(ASTContext &C, QualType Ty) {
// Size dependent types are just typedefs to normal integer types
// (e.g. unsigned long), so we cannot distinguish them from other typedefs to
// integers other than by their names.
StringRef SizeTypeNames[] = {"size_t", "intptr_t", "uintptr_t", "ptrdiff_t"};
// Remove typedefs one by one until we reach a typedef
// for a size dependent type.
QualType DesugaredTy = Ty;
do {
ArrayRef<StringRef> Names(SizeTypeNames);
auto Match =
std::find(Names.begin(), Names.end(), DesugaredTy.getAsString());
if (Names.end() != Match)
return true;
Ty = DesugaredTy;
DesugaredTy = Ty.getSingleStepDesugaredType(C);
} while (DesugaredTy != Ty);
return false;
}
static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
if (PT->isPointerType()) {
QualType PointeeType = PT->getPointeeType();
if (PointeeType->isPointerType())
return PtrPtrKernelParam;
if (PointeeType.getAddressSpace() == LangAS::opencl_generic ||
PointeeType.getAddressSpace() == LangAS::opencl_private ||
PointeeType.getAddressSpace() == LangAS::Default)
return InvalidAddrSpacePtrKernelParam;
return PtrKernelParam;
}
// OpenCL v1.2 s6.9.k:
// Arguments to kernel functions in a program cannot be declared with the
// built-in scalar types bool, half, size_t, ptrdiff_t, intptr_t, and
// uintptr_t or a struct and/or union that contain fields declared to be one
// of these built-in scalar types.
if (isOpenCLSizeDependentType(S.getASTContext(), PT))
return InvalidKernelParam;
if (PT->isImageType())
return PtrKernelParam;
if (PT->isBooleanType() || PT->isEventT() || PT->isReserveIDT())
return InvalidKernelParam;
// OpenCL extension spec v1.2 s9.5:
// This extension adds support for half scalar and vector types as built-in
// types that can be used for arithmetic operations, conversions etc.
if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16") && PT->isHalfType())
return InvalidKernelParam;
if (PT->isRecordType())
return RecordKernelParam;
// Look into an array argument to check if it has a forbidden type.
if (PT->isArrayType()) {
const Type *UnderlyingTy = PT->getPointeeOrArrayElementType();
// Call ourself to check an underlying type of an array. Since the
// getPointeeOrArrayElementType returns an innermost type which is not an
// array, this recusive call only happens once.
return getOpenCLKernelParameterType(S, QualType(UnderlyingTy, 0));
}
return ValidKernelParam;
}
static void checkIsValidOpenCLKernelParameter(
Sema &S,
Declarator &D,
ParmVarDecl *Param,
llvm::SmallPtrSetImpl<const Type *> &ValidTypes) {
QualType PT = Param->getType();
// Cache the valid types we encounter to avoid rechecking structs that are
// used again
if (ValidTypes.count(PT.getTypePtr()))
return;
switch (getOpenCLKernelParameterType(S, PT)) {
case PtrPtrKernelParam:
// OpenCL v1.2 s6.9.a:
// A kernel function argument cannot be declared as a
// pointer to a pointer type.
S.Diag(Param->getLocation(), diag::err_opencl_ptrptr_kernel_param);
D.setInvalidType();
return;
case InvalidAddrSpacePtrKernelParam:
// OpenCL v1.0 s6.5:
// __kernel function arguments declared to be a pointer of a type can point
// to one of the following address spaces only : __global, __local or
// __constant.
S.Diag(Param->getLocation(), diag::err_kernel_arg_address_space);
D.setInvalidType();
return;
// OpenCL v1.2 s6.9.k:
// Arguments to kernel functions in a program cannot be declared with the
// built-in scalar types bool, half, size_t, ptrdiff_t, intptr_t, and
// uintptr_t or a struct and/or union that contain fields declared to be
// one of these built-in scalar types.
case InvalidKernelParam:
// OpenCL v1.2 s6.8 n:
// A kernel function argument cannot be declared
// of event_t type.
// Do not diagnose half type since it is diagnosed as invalid argument
// type for any function elsewhere.
if (!PT->isHalfType()) {
S.Diag(Param->getLocation(), diag::err_bad_kernel_param_type) << PT;
// Explain what typedefs are involved.
const TypedefType *Typedef = nullptr;
while ((Typedef = PT->getAs<TypedefType>())) {
SourceLocation Loc = Typedef->getDecl()->getLocation();
// SourceLocation may be invalid for a built-in type.
if (Loc.isValid())
S.Diag(Loc, diag::note_entity_declared_at) << PT;
PT = Typedef->desugar();
}
}
D.setInvalidType();
return;
case PtrKernelParam:
case ValidKernelParam:
ValidTypes.insert(PT.getTypePtr());
return;
case RecordKernelParam:
break;
}
// Track nested structs we will inspect
SmallVector<const Decl *, 4> VisitStack;
// Track where we are in the nested structs. Items will migrate from
// VisitStack to HistoryStack as we do the DFS for bad field.
SmallVector<const FieldDecl *, 4> HistoryStack;
HistoryStack.push_back(nullptr);
// At this point we already handled everything except of a RecordType or
// an ArrayType of a RecordType.
assert((PT->isArrayType() || PT->isRecordType()) && "Unexpected type.");
const RecordType *RecTy =
PT->getPointeeOrArrayElementType()->getAs<RecordType>();
const RecordDecl *OrigRecDecl = RecTy->getDecl();
VisitStack.push_back(RecTy->getDecl());
assert(VisitStack.back() && "First decl null?");
do {
const Decl *Next = VisitStack.pop_back_val();
if (!Next) {
assert(!HistoryStack.empty());
// Found a marker, we have gone up a level
if (const FieldDecl *Hist = HistoryStack.pop_back_val())
ValidTypes.insert(Hist->getType().getTypePtr());
continue;
}
// Adds everything except the original parameter declaration (which is not a
// field itself) to the history stack.
const RecordDecl *RD;
if (const FieldDecl *Field = dyn_cast<FieldDecl>(Next)) {
HistoryStack.push_back(Field);
QualType FieldTy = Field->getType();
// Other field types (known to be valid or invalid) are handled while we
// walk around RecordDecl::fields().
assert((FieldTy->isArrayType() || FieldTy->isRecordType()) &&
"Unexpected type.");
const Type *FieldRecTy = FieldTy->getPointeeOrArrayElementType();
RD = FieldRecTy->castAs<RecordType>()->getDecl();
} else {
RD = cast<RecordDecl>(Next);
}
// Add a null marker so we know when we've gone back up a level
VisitStack.push_back(nullptr);
for (const auto *FD : RD->fields()) {
QualType QT = FD->getType();
if (ValidTypes.count(QT.getTypePtr()))
continue;
OpenCLParamType ParamType = getOpenCLKernelParameterType(S, QT);
if (ParamType == ValidKernelParam)
continue;
if (ParamType == RecordKernelParam) {
VisitStack.push_back(FD);
continue;
}
// OpenCL v1.2 s6.9.p:
// Arguments to kernel functions that are declared to be a struct or union
// do not allow OpenCL objects to be passed as elements of the struct or
// union.
if (ParamType == PtrKernelParam || ParamType == PtrPtrKernelParam ||
ParamType == InvalidAddrSpacePtrKernelParam) {
S.Diag(Param->getLocation(),
diag::err_record_with_pointers_kernel_param)
<< PT->isUnionType()
<< PT;
} else {
S.Diag(Param->getLocation(), diag::err_bad_kernel_param_type) << PT;
}
S.Diag(OrigRecDecl->getLocation(), diag::note_within_field_of_type)
<< OrigRecDecl->getDeclName();
// We have an error, now let's go back up through history and show where
// the offending field came from
for (ArrayRef<const FieldDecl *>::const_iterator
I = HistoryStack.begin() + 1,
E = HistoryStack.end();
I != E; ++I) {
const FieldDecl *OuterField = *I;
S.Diag(OuterField->getLocation(), diag::note_within_field_of_type)
<< OuterField->getType();
}
S.Diag(FD->getLocation(), diag::note_illegal_field_declared_here)
<< QT->isPointerType()
<< QT;
D.setInvalidType();
return;
}
} while (!VisitStack.empty());
}
/// Find the DeclContext in which a tag is implicitly declared if we see an
/// elaborated type specifier in the specified context, and lookup finds
/// nothing.
static DeclContext *getTagInjectionContext(DeclContext *DC) {
while (!DC->isFileContext() && !DC->isFunctionOrMethod())
DC = DC->getParent();
return DC;
}
/// Find the Scope in which a tag is implicitly declared if we see an
/// elaborated type specifier in the specified context, and lookup finds
/// nothing.
static Scope *getTagInjectionScope(Scope *S, const LangOptions &LangOpts) {
while (S->isClassScope() ||
(LangOpts.CPlusPlus &&
S->isFunctionPrototypeScope()) ||
((S->getFlags() & Scope::DeclScope) == 0) ||
(S->getEntity() && S->getEntity()->isTransparentContext()))
S = S->getParent();
return S;
}
NamedDecl*
Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo, LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope) {
QualType R = TInfo->getType();
assert(R->isFunctionType());
// TODO: consider using NameInfo for diagnostic.
DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
DeclarationName Name = NameInfo.getName();
StorageClass SC = getFunctionStorageClass(*this, D);
if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec())
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_invalid_thread)
<< DeclSpec::getSpecifierName(TSCS);
if (D.isFirstDeclarationOfMember())
adjustMemberFunctionCC(R, D.isStaticMember(), D.isCtorOrDtor(),
D.getIdentifierLoc());
bool isFriend = false;
FunctionTemplateDecl *FunctionTemplate = nullptr;
bool isMemberSpecialization = false;
bool isFunctionTemplateSpecialization = false;
bool isDependentClassScopeExplicitSpecialization = false;
bool HasExplicitTemplateArgs = false;
TemplateArgumentListInfo TemplateArgs;
bool isVirtualOkay = false;
DeclContext *OriginalDC = DC;
bool IsLocalExternDecl = adjustContextForLocalExternDecl(DC);
FunctionDecl *NewFD = CreateNewFunctionDecl(*this, D, DC, R, TInfo, SC,
isVirtualOkay);
if (!NewFD) return nullptr;
if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer())
NewFD->setTopLevelDeclInObjCContainer();
// Set the lexical context. If this is a function-scope declaration, or has a
// C++ scope specifier, or is the object of a friend declaration, the lexical
// context will be different from the semantic context.
NewFD->setLexicalDeclContext(CurContext);
if (IsLocalExternDecl)
NewFD->setLocalExternDecl();
if (getLangOpts().CPlusPlus) {
bool isInline = D.getDeclSpec().isInlineSpecified();
bool isVirtual = D.getDeclSpec().isVirtualSpecified();
bool isExplicit = D.getDeclSpec().isExplicitSpecified();
bool isConstexpr = D.getDeclSpec().isConstexprSpecified();
isFriend = D.getDeclSpec().isFriendSpecified();
if (isFriend && !isInline && D.isFunctionDefinition()) {
// C++ [class.friend]p5
// A function can be defined in a friend declaration of a
// class . . . . Such a function is implicitly inline.
NewFD->setImplicitlyInline();
}
// If this is a method defined in an __interface, and is not a constructor
// or an overloaded operator, then set the pure flag (isVirtual will already
// return true).
if (const CXXRecordDecl *Parent =
dyn_cast<CXXRecordDecl>(NewFD->getDeclContext())) {
if (Parent->isInterface() && cast<CXXMethodDecl>(NewFD)->isUserProvided())
NewFD->setPure(true);
// C++ [class.union]p2
// A union can have member functions, but not virtual functions.
if (isVirtual && Parent->isUnion())
Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_virtual_in_union);
}
SetNestedNameSpecifier(NewFD, D);
isMemberSpecialization = false;
isFunctionTemplateSpecialization = false;
if (D.isInvalidType())
NewFD->setInvalidDecl();
// Match up the template parameter lists with the scope specifier, then
// determine whether we have a template or a template specialization.
bool Invalid = false;
if (TemplateParameterList *TemplateParams =
MatchTemplateParametersToScopeSpecifier(
D.getDeclSpec().getLocStart(), D.getIdentifierLoc(),
D.getCXXScopeSpec(),
D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId
? D.getName().TemplateId
: nullptr,
TemplateParamLists, isFriend, isMemberSpecialization,
Invalid)) {
if (TemplateParams->size() > 0) {
// This is a function template
// Check that we can declare a template here.
if (CheckTemplateDeclScope(S, TemplateParams))
NewFD->setInvalidDecl();
// A destructor cannot be a template.
if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
Diag(NewFD->getLocation(), diag::err_destructor_template);
NewFD->setInvalidDecl();
}
// If we're adding a template to a dependent context, we may need to
// rebuilding some of the types used within the template parameter list,
// now that we know what the current instantiation is.
if (DC->isDependentContext()) {
ContextRAII SavedContext(*this, DC);
if (RebuildTemplateParamsInCurrentInstantiation(TemplateParams))
Invalid = true;
}
FunctionTemplate = FunctionTemplateDecl::Create(Context, DC,
NewFD->getLocation(),
Name, TemplateParams,
NewFD);
FunctionTemplate->setLexicalDeclContext(CurContext);
NewFD->setDescribedFunctionTemplate(FunctionTemplate);
// For source fidelity, store the other template param lists.
if (TemplateParamLists.size() > 1) {
NewFD->setTemplateParameterListsInfo(Context,
TemplateParamLists.drop_back(1));
}
} else {
// This is a function template specialization.
isFunctionTemplateSpecialization = true;
// For source fidelity, store all the template param lists.
if (TemplateParamLists.size() > 0)
NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists);
// C++0x [temp.expl.spec]p20 forbids "template<> friend void foo(int);".
if (isFriend) {
// We want to remove the "template<>", found here.
SourceRange RemoveRange = TemplateParams->getSourceRange();
// If we remove the template<> and the name is not a
// template-id, we're actually silently creating a problem:
// the friend declaration will refer to an untemplated decl,
// and clearly the user wants a template specialization. So
// we need to insert '<>' after the name.
SourceLocation InsertLoc;
if (D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) {
InsertLoc = D.getName().getSourceRange().getEnd();
InsertLoc = getLocForEndOfToken(InsertLoc);
}
Diag(D.getIdentifierLoc(), diag::err_template_spec_decl_friend)
<< Name << RemoveRange
<< FixItHint::CreateRemoval(RemoveRange)
<< FixItHint::CreateInsertion(InsertLoc, "<>");
}
}
}
else {
// All template param lists were matched against the scope specifier:
// this is NOT (an explicit specialization of) a template.
if (TemplateParamLists.size() > 0)
// For source fidelity, store all the template param lists.
NewFD->setTemplateParameterListsInfo(Context, TemplateParamLists);
}
if (Invalid) {
NewFD->setInvalidDecl();
if (FunctionTemplate)
FunctionTemplate->setInvalidDecl();
}
// C++ [dcl.fct.spec]p5:
// The virtual specifier shall only be used in declarations of
// nonstatic class member functions that appear within a
// member-specification of a class declaration; see 10.3.
//
if (isVirtual && !NewFD->isInvalidDecl()) {
if (!isVirtualOkay) {
Diag(D.getDeclSpec().getVirtualSpecLoc(),
diag::err_virtual_non_function);
} else if (!CurContext->isRecord()) {
// 'virtual' was specified outside of the class.
Diag(D.getDeclSpec().getVirtualSpecLoc(),
diag::err_virtual_out_of_class)
<< FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc());
} else if (NewFD->getDescribedFunctionTemplate()) {
// C++ [temp.mem]p3:
// A member function template shall not be virtual.
Diag(D.getDeclSpec().getVirtualSpecLoc(),
diag::err_virtual_member_function_template)
<< FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc());
} else {
// Okay: Add virtual to the method.
NewFD->setVirtualAsWritten(true);
}
if (getLangOpts().CPlusPlus14 &&
NewFD->getReturnType()->isUndeducedType())
Diag(D.getDeclSpec().getVirtualSpecLoc(), diag::err_auto_fn_virtual);
}
if (getLangOpts().CPlusPlus14 &&
(NewFD->isDependentContext() ||
(isFriend && CurContext->isDependentContext())) &&
NewFD->getReturnType()->isUndeducedType()) {
// If the function template is referenced directly (for instance, as a
// member of the current instantiation), pretend it has a dependent type.
// This is not really justified by the standard, but is the only sane
// thing to do.
// FIXME: For a friend function, we have not marked the function as being
// a friend yet, so 'isDependentContext' on the FD doesn't work.
const FunctionProtoType *FPT =
NewFD->getType()->castAs<FunctionProtoType>();
QualType Result =
SubstAutoType(FPT->getReturnType(), Context.DependentTy);
NewFD->setType(Context.getFunctionType(Result, FPT->getParamTypes(),
FPT->getExtProtoInfo()));
}
// C++ [dcl.fct.spec]p3:
// The inline specifier shall not appear on a block scope function
// declaration.
if (isInline && !NewFD->isInvalidDecl()) {
if (CurContext->isFunctionOrMethod()) {
// 'inline' is not allowed on block scope function declaration.
Diag(D.getDeclSpec().getInlineSpecLoc(),
diag::err_inline_declaration_block_scope) << Name
<< FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
}
}
// C++ [dcl.fct.spec]p6:
// The explicit specifier shall be used only in the declaration of a
// constructor or conversion function within its class definition;
// see 12.3.1 and 12.3.2.
if (isExplicit && !NewFD->isInvalidDecl() &&
!isa<CXXDeductionGuideDecl>(NewFD)) {
if (!CurContext->isRecord()) {
// 'explicit' was specified outside of the class.
Diag(D.getDeclSpec().getExplicitSpecLoc(),
diag::err_explicit_out_of_class)
<< FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc());
} else if (!isa<CXXConstructorDecl>(NewFD) &&
!isa<CXXConversionDecl>(NewFD)) {
// 'explicit' was specified on a function that wasn't a constructor
// or conversion function.
Diag(D.getDeclSpec().getExplicitSpecLoc(),
diag::err_explicit_non_ctor_or_conv_function)
<< FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc());
}
}
if (isConstexpr) {
// C++11 [dcl.constexpr]p2: constexpr functions and constexpr constructors
// are implicitly inline.
NewFD->setImplicitlyInline();
// C++11 [dcl.constexpr]p3: functions declared constexpr are required to
// be either constructors or to return a literal type. Therefore,
// destructors cannot be declared constexpr.
if (isa<CXXDestructorDecl>(NewFD))
Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_constexpr_dtor);
}
// If __module_private__ was specified, mark the function accordingly.
if (D.getDeclSpec().isModulePrivateSpecified()) {
if (isFunctionTemplateSpecialization) {
SourceLocation ModulePrivateLoc
= D.getDeclSpec().getModulePrivateSpecLoc();
Diag(ModulePrivateLoc, diag::err_module_private_specialization)
<< 0
<< FixItHint::CreateRemoval(ModulePrivateLoc);
} else {
NewFD->setModulePrivate();
if (FunctionTemplate)
FunctionTemplate->setModulePrivate();
}
}
if (isFriend) {
if (FunctionTemplate) {
FunctionTemplate->setObjectOfFriendDecl();
FunctionTemplate->setAccess(AS_public);
}
NewFD->setObjectOfFriendDecl();
NewFD->setAccess(AS_public);
}
// If a function is defined as defaulted or deleted, mark it as such now.
// FIXME: Does this ever happen? ActOnStartOfFunctionDef forces the function
// definition kind to FDK_Definition.
switch (D.getFunctionDefinitionKind()) {
case FDK_Declaration:
case FDK_Definition:
break;
case FDK_Defaulted:
NewFD->setDefaulted();
break;
case FDK_Deleted:
NewFD->setDeletedAsWritten();
break;
}
if (isa<CXXMethodDecl>(NewFD) && DC == CurContext &&
D.isFunctionDefinition()) {
// C++ [class.mfct]p2:
// A member function may be defined (8.4) in its class definition, in
// which case it is an inline member function (7.1.2)
NewFD->setImplicitlyInline();
}
if (SC == SC_Static && isa<CXXMethodDecl>(NewFD) &&
!CurContext->isRecord()) {
// C++ [class.static]p1:
// A data or function member of a class may be declared static
// in a class definition, in which case it is a static member of
// the class.
// Complain about the 'static' specifier if it's on an out-of-line
// member function definition.
Diag(D.getDeclSpec().getStorageClassSpecLoc(),
diag::err_static_out_of_line)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
}
// C++11 [except.spec]p15:
// A deallocation function with no exception-specification is treated
// as if it were specified with noexcept(true).
const FunctionProtoType *FPT = R->getAs<FunctionProtoType>();
if ((Name.getCXXOverloadedOperator() == OO_Delete ||
Name.getCXXOverloadedOperator() == OO_Array_Delete) &&
getLangOpts().CPlusPlus11 && FPT && !FPT->hasExceptionSpec())
NewFD->setType(Context.getFunctionType(
FPT->getReturnType(), FPT->getParamTypes(),
FPT->getExtProtoInfo().withExceptionSpec(EST_BasicNoexcept)));
}
// Filter out previous declarations that don't match the scope.
FilterLookupForScope(Previous, OriginalDC, S, shouldConsiderLinkage(NewFD),
D.getCXXScopeSpec().isNotEmpty() ||
isMemberSpecialization ||
isFunctionTemplateSpecialization);
// Handle GNU asm-label extension (encoded as an attribute).
if (Expr *E = (Expr*) D.getAsmLabel()) {
// The parser guarantees this is a string.
StringLiteral *SE = cast<StringLiteral>(E);
NewFD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0), Context,
SE->getString(), 0));
} else if (!ExtnameUndeclaredIdentifiers.empty()) {
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
ExtnameUndeclaredIdentifiers.find(NewFD->getIdentifier());
if (I != ExtnameUndeclaredIdentifiers.end()) {
if (isDeclExternC(NewFD)) {
NewFD->addAttr(I->second);
ExtnameUndeclaredIdentifiers.erase(I);
} else
Diag(NewFD->getLocation(), diag::warn_redefine_extname_not_applied)
<< /*Variable*/0 << NewFD;
}
}
// Copy the parameter declarations from the declarator D to the function
// declaration NewFD, if they are available. First scavenge them into Params.
SmallVector<ParmVarDecl*, 16> Params;
unsigned FTIIdx;
if (D.isFunctionDeclarator(FTIIdx)) {
DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(FTIIdx).Fun;
// Check for C99 6.7.5.3p10 - foo(void) is a non-varargs
// function that takes no arguments, not a function that takes a
// single void argument.
// We let through "const void" here because Sema::GetTypeForDeclarator
// already checks for that case.
if (FTIHasNonVoidParameters(FTI) && FTI.Params[0].Param) {
for (unsigned i = 0, e = FTI.NumParams; i != e; ++i) {
ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
assert(Param->getDeclContext() != NewFD && "Was set before ?");
Param->setDeclContext(NewFD);
Params.push_back(Param);
if (Param->isInvalidDecl())
NewFD->setInvalidDecl();
}
}
if (!getLangOpts().CPlusPlus) {
// In C, find all the tag declarations from the prototype and move them
// into the function DeclContext. Remove them from the surrounding tag
// injection context of the function, which is typically but not always
// the TU.
DeclContext *PrototypeTagContext =
getTagInjectionContext(NewFD->getLexicalDeclContext());
for (NamedDecl *NonParmDecl : FTI.getDeclsInPrototype()) {
auto *TD = dyn_cast<TagDecl>(NonParmDecl);
// We don't want to reparent enumerators. Look at their parent enum
// instead.
if (!TD) {
if (auto *ECD = dyn_cast<EnumConstantDecl>(NonParmDecl))
TD = cast<EnumDecl>(ECD->getDeclContext());
}
if (!TD)
continue;
DeclContext *TagDC = TD->getLexicalDeclContext();
if (!TagDC->containsDecl(TD))
continue;
TagDC->removeDecl(TD);
TD->setDeclContext(NewFD);
NewFD->addDecl(TD);
// Preserve the lexical DeclContext if it is not the surrounding tag
// injection context of the FD. In this example, the semantic context of
// E will be f and the lexical context will be S, while both the
// semantic and lexical contexts of S will be f:
// void f(struct S { enum E { a } f; } s);
if (TagDC != PrototypeTagContext)
TD->setLexicalDeclContext(TagDC);
}
}
} else if (const FunctionProtoType *FT = R->getAs<FunctionProtoType>()) {
// When we're declaring a function with a typedef, typeof, etc as in the
// following example, we'll need to synthesize (unnamed)
// parameters for use in the declaration.
//
// @code
// typedef void fn(int);
// fn f;
// @endcode
// Synthesize a parameter for each argument type.
for (const auto &AI : FT->param_types()) {
ParmVarDecl *Param =
BuildParmVarDeclForTypedef(NewFD, D.getIdentifierLoc(), AI);
Param->setScopeInfo(0, Params.size());
Params.push_back(Param);
}
} else {
assert(R->isFunctionNoProtoType() && NewFD->getNumParams() == 0 &&
"Should not need args for typedef of non-prototype fn");
}
// Finally, we know we have the right number of parameters, install them.
NewFD->setParams(Params);
if (D.getDeclSpec().isNoreturnSpecified())
NewFD->addAttr(
::new(Context) C11NoReturnAttr(D.getDeclSpec().getNoreturnSpecLoc(),
Context, 0));
// Functions returning a variably modified type violate C99 6.7.5.2p2
// because all functions have linkage.
if (!NewFD->isInvalidDecl() &&
NewFD->getReturnType()->isVariablyModifiedType()) {
Diag(NewFD->getLocation(), diag::err_vm_func_decl);
NewFD->setInvalidDecl();
}
// Apply an implicit SectionAttr if '#pragma clang section text' is active
if (PragmaClangTextSection.Valid && D.isFunctionDefinition() &&
!NewFD->hasAttr<SectionAttr>()) {
NewFD->addAttr(PragmaClangTextSectionAttr::CreateImplicit(Context,
PragmaClangTextSection.SectionName,
PragmaClangTextSection.PragmaLocation));
}
// Apply an implicit SectionAttr if #pragma code_seg is active.
if (CodeSegStack.CurrentValue && D.isFunctionDefinition() &&
!NewFD->hasAttr<SectionAttr>()) {
NewFD->addAttr(
SectionAttr::CreateImplicit(Context, SectionAttr::Declspec_allocate,
CodeSegStack.CurrentValue->getString(),
CodeSegStack.CurrentPragmaLocation));
if (UnifySection(CodeSegStack.CurrentValue->getString(),
ASTContext::PSF_Implicit | ASTContext::PSF_Execute |
ASTContext::PSF_Read,
NewFD))
NewFD->dropAttr<SectionAttr>();
}
// Apply an implicit CodeSegAttr from class declspec or
// apply an implicit SectionAttr from #pragma code_seg if active.
if (!NewFD->hasAttr<CodeSegAttr>()) {
if (Attr *SAttr = getImplicitCodeSegOrSectionAttrForFunction(NewFD,
D.isFunctionDefinition())) {
NewFD->addAttr(SAttr);
}
}
// Handle attributes.
ProcessDeclAttributes(S, NewFD, D);
if (getLangOpts().OpenCL) {
// OpenCL v1.1 s6.5: Using an address space qualifier in a function return
// type declaration will generate a compilation error.
LangAS AddressSpace = NewFD->getReturnType().getAddressSpace();
if (AddressSpace != LangAS::Default) {
Diag(NewFD->getLocation(),
diag::err_opencl_return_value_with_address_space);
NewFD->setInvalidDecl();
}
}
if (!getLangOpts().CPlusPlus) {
// Perform semantic checking on the function declaration.
if (!NewFD->isInvalidDecl() && NewFD->isMain())
CheckMain(NewFD, D.getDeclSpec());
if (!NewFD->isInvalidDecl() && NewFD->isMSVCRTEntryPoint())
CheckMSVCRTEntryPoint(NewFD);
if (!NewFD->isInvalidDecl())
D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
isMemberSpecialization));
else if (!Previous.empty())
// Recover gracefully from an invalid redeclaration.
D.setRedeclaration(true);
assert((NewFD->isInvalidDecl() || !D.isRedeclaration() ||
Previous.getResultKind() != LookupResult::FoundOverloaded) &&
"previous declaration set still overloaded");
// Diagnose no-prototype function declarations with calling conventions that
// don't support variadic calls. Only do this in C and do it after merging
// possibly prototyped redeclarations.
const FunctionType *FT = NewFD->getType()->castAs<FunctionType>();
if (isa<FunctionNoProtoType>(FT) && !D.isFunctionDefinition()) {
CallingConv CC = FT->getExtInfo().getCC();
if (!supportsVariadicCall(CC)) {
// Windows system headers sometimes accidentally use stdcall without
// (void) parameters, so we relax this to a warning.
int DiagID =
CC == CC_X86StdCall ? diag::warn_cconv_knr : diag::err_cconv_knr;
Diag(NewFD->getLocation(), DiagID)
<< FunctionType::getNameForCallConv(CC);
}
}
} else {
// C++11 [replacement.functions]p3:
// The program's definitions shall not be specified as inline.
//
// N.B. We diagnose declarations instead of definitions per LWG issue 2340.
//
// Suppress the diagnostic if the function is __attribute__((used)), since
// that forces an external definition to be emitted.
if (D.getDeclSpec().isInlineSpecified() &&
NewFD->isReplaceableGlobalAllocationFunction() &&
!NewFD->hasAttr<UsedAttr>())
Diag(D.getDeclSpec().getInlineSpecLoc(),
diag::ext_operator_new_delete_declared_inline)
<< NewFD->getDeclName();
// If the declarator is a template-id, translate the parser's template
// argument list into our AST format.
if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc);
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
translateTemplateArguments(TemplateArgsPtr,
TemplateArgs);
HasExplicitTemplateArgs = true;
if (NewFD->isInvalidDecl()) {
HasExplicitTemplateArgs = false;
} else if (FunctionTemplate) {
// Function template with explicit template arguments.
Diag(D.getIdentifierLoc(), diag::err_function_template_partial_spec)
<< SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc);
HasExplicitTemplateArgs = false;
} else {
assert((isFunctionTemplateSpecialization ||
D.getDeclSpec().isFriendSpecified()) &&
"should have a 'template<>' for this decl");
// "friend void foo<>(int);" is an implicit specialization decl.
isFunctionTemplateSpecialization = true;
}
} else if (isFriend && isFunctionTemplateSpecialization) {
// This combination is only possible in a recovery case; the user
// wrote something like:
// template <> friend void foo(int);
// which we're recovering from as if the user had written:
// friend void foo<>(int);
// Go ahead and fake up a template id.
HasExplicitTemplateArgs = true;
TemplateArgs.setLAngleLoc(D.getIdentifierLoc());
TemplateArgs.setRAngleLoc(D.getIdentifierLoc());
}
// We do not add HD attributes to specializations here because
// they may have different constexpr-ness compared to their
// templates and, after maybeAddCUDAHostDeviceAttrs() is applied,
// may end up with different effective targets. Instead, a
// specialization inherits its target attributes from its template
// in the CheckFunctionTemplateSpecialization() call below.
if (getLangOpts().CUDA & !isFunctionTemplateSpecialization)
maybeAddCUDAHostDeviceAttrs(NewFD, Previous);
// If it's a friend (and only if it's a friend), it's possible
// that either the specialized function type or the specialized
// template is dependent, and therefore matching will fail. In
// this case, don't check the specialization yet.
bool InstantiationDependent = false;
if (isFunctionTemplateSpecialization && isFriend &&
(NewFD->getType()->isDependentType() || DC->isDependentContext() ||
TemplateSpecializationType::anyDependentTemplateArguments(
TemplateArgs,
InstantiationDependent))) {
assert(HasExplicitTemplateArgs &&
"friend function specialization without template args");
if (CheckDependentFunctionTemplateSpecialization(NewFD, TemplateArgs,
Previous))
NewFD->setInvalidDecl();
} else if (isFunctionTemplateSpecialization) {
if (CurContext->isDependentContext() && CurContext->isRecord()
&& !isFriend) {
isDependentClassScopeExplicitSpecialization = true;
} else if (!NewFD->isInvalidDecl() &&
CheckFunctionTemplateSpecialization(
NewFD, (HasExplicitTemplateArgs ? &TemplateArgs : nullptr),
Previous))
NewFD->setInvalidDecl();
// C++ [dcl.stc]p1:
// A storage-class-specifier shall not be specified in an explicit
// specialization (14.7.3)
FunctionTemplateSpecializationInfo *Info =
NewFD->getTemplateSpecializationInfo();
if (Info && SC != SC_None) {
if (SC != Info->getTemplate()->getTemplatedDecl()->getStorageClass())
Diag(NewFD->getLocation(),
diag::err_explicit_specialization_inconsistent_storage_class)
<< SC
<< FixItHint::CreateRemoval(
D.getDeclSpec().getStorageClassSpecLoc());
else
Diag(NewFD->getLocation(),
diag::ext_explicit_specialization_storage_class)
<< FixItHint::CreateRemoval(
D.getDeclSpec().getStorageClassSpecLoc());
}
} else if (isMemberSpecialization && isa<CXXMethodDecl>(NewFD)) {
if (CheckMemberSpecialization(NewFD, Previous))
NewFD->setInvalidDecl();
}
// Perform semantic checking on the function declaration.
if (!isDependentClassScopeExplicitSpecialization) {
if (!NewFD->isInvalidDecl() && NewFD->isMain())
CheckMain(NewFD, D.getDeclSpec());
if (!NewFD->isInvalidDecl() && NewFD->isMSVCRTEntryPoint())
CheckMSVCRTEntryPoint(NewFD);
if (!NewFD->isInvalidDecl())
D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
isMemberSpecialization));
else if (!Previous.empty())
// Recover gracefully from an invalid redeclaration.
D.setRedeclaration(true);
}
assert((NewFD->isInvalidDecl() || !D.isRedeclaration() ||
Previous.getResultKind() != LookupResult::FoundOverloaded) &&
"previous declaration set still overloaded");
NamedDecl *PrincipalDecl = (FunctionTemplate
? cast<NamedDecl>(FunctionTemplate)
: NewFD);
if (isFriend && NewFD->getPreviousDecl()) {
AccessSpecifier Access = AS_public;
if (!NewFD->isInvalidDecl())
Access = NewFD->getPreviousDecl()->getAccess();
NewFD->setAccess(Access);
if (FunctionTemplate) FunctionTemplate->setAccess(Access);
}
if (NewFD->isOverloadedOperator() && !DC->isRecord() &&
PrincipalDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
PrincipalDecl->setNonMemberOperator();
// If we have a function template, check the template parameter
// list. This will check and merge default template arguments.
if (FunctionTemplate) {
FunctionTemplateDecl *PrevTemplate =
FunctionTemplate->getPreviousDecl();
CheckTemplateParameterList(FunctionTemplate->getTemplateParameters(),
PrevTemplate ? PrevTemplate->getTemplateParameters()
: nullptr,
D.getDeclSpec().isFriendSpecified()
? (D.isFunctionDefinition()
? TPC_FriendFunctionTemplateDefinition
: TPC_FriendFunctionTemplate)
: (D.getCXXScopeSpec().isSet() &&
DC && DC->isRecord() &&
DC->isDependentContext())
? TPC_ClassTemplateMember
: TPC_FunctionTemplate);
}
if (NewFD->isInvalidDecl()) {
// Ignore all the rest of this.
} else if (!D.isRedeclaration()) {
struct ActOnFDArgs ExtraArgs = { S, D, TemplateParamLists,
AddToScope };
// Fake up an access specifier if it's supposed to be a class member.
if (isa<CXXRecordDecl>(NewFD->getDeclContext()))
NewFD->setAccess(AS_public);
// Qualified decls generally require a previous declaration.
if (D.getCXXScopeSpec().isSet()) {
// ...with the major exception of templated-scope or
// dependent-scope friend declarations.
// TODO: we currently also suppress this check in dependent
// contexts because (1) the parameter depth will be off when
// matching friend templates and (2) we might actually be
// selecting a friend based on a dependent factor. But there
// are situations where these conditions don't apply and we
// can actually do this check immediately.
if (isFriend &&
(TemplateParamLists.size() ||
D.getCXXScopeSpec().getScopeRep()->isDependent() ||
CurContext->isDependentContext())) {
// ignore these
} else {
// The user tried to provide an out-of-line definition for a
// function that is a member of a class or namespace, but there
// was no such member function declared (C++ [class.mfct]p2,
// C++ [namespace.memdef]p2). For example:
//
// class X {
// void f() const;
// };
//
// void X::f() { } // ill-formed
//
// Complain about this problem, and attempt to suggest close
// matches (e.g., those that differ only in cv-qualifiers and
// whether the parameter types are references).
if (NamedDecl *Result = DiagnoseInvalidRedeclaration(
*this, Previous, NewFD, ExtraArgs, false, nullptr)) {
AddToScope = ExtraArgs.AddToScope;
return Result;
}
}
// Unqualified local friend declarations are required to resolve
// to something.
} else if (isFriend && cast<CXXRecordDecl>(CurContext)->isLocalClass()) {
if (NamedDecl *Result = DiagnoseInvalidRedeclaration(
*this, Previous, NewFD, ExtraArgs, true, S)) {
AddToScope = ExtraArgs.AddToScope;
return Result;
}
}
} else if (!D.isFunctionDefinition() &&
isa<CXXMethodDecl>(NewFD) && NewFD->isOutOfLine() &&
!isFriend && !isFunctionTemplateSpecialization &&
!isMemberSpecialization) {
// An out-of-line member function declaration must also be a
// definition (C++ [class.mfct]p2).
// Note that this is not the case for explicit specializations of
// function templates or member functions of class templates, per
// C++ [temp.expl.spec]p2. We also allow these declarations as an
// extension for compatibility with old SWIG code which likes to
// generate them.
Diag(NewFD->getLocation(), diag::ext_out_of_line_declaration)
<< D.getCXXScopeSpec().getRange();
}
}
ProcessPragmaWeak(S, NewFD);
checkAttributesAfterMerging(*this, *NewFD);
AddKnownFunctionAttributes(NewFD);
if (NewFD->hasAttr<OverloadableAttr>() &&
!NewFD->getType()->getAs<FunctionProtoType>()) {
Diag(NewFD->getLocation(),
diag::err_attribute_overloadable_no_prototype)
<< NewFD;
// Turn this into a variadic function with no parameters.
const FunctionType *FT = NewFD->getType()->getAs<FunctionType>();
FunctionProtoType::ExtProtoInfo EPI(
Context.getDefaultCallingConvention(true, false));
EPI.Variadic = true;
EPI.ExtInfo = FT->getExtInfo();
QualType R = Context.getFunctionType(FT->getReturnType(), None, EPI);
NewFD->setType(R);
}
// If there's a #pragma GCC visibility in scope, and this isn't a class
// member, set the visibility of this function.
if (!DC->isRecord() && NewFD->isExternallyVisible())
AddPushedVisibilityAttribute(NewFD);
// If there's a #pragma clang arc_cf_code_audited in scope, consider
// marking the function.
AddCFAuditedAttribute(NewFD);
// If this is a function definition, check if we have to apply optnone due to
// a pragma.
if(D.isFunctionDefinition())
AddRangeBasedOptnone(NewFD);
// If this is the first declaration of an extern C variable, update
// the map of such variables.
if (NewFD->isFirstDecl() && !NewFD->isInvalidDecl() &&
isIncompleteDeclExternC(*this, NewFD))
RegisterLocallyScopedExternCDecl(NewFD, S);
// Set this FunctionDecl's range up to the right paren.
NewFD->setRangeEnd(D.getSourceRange().getEnd());
if (D.isRedeclaration() && !Previous.empty()) {
NamedDecl *Prev = Previous.getRepresentativeDecl();
checkDLLAttributeRedeclaration(*this, Prev, NewFD,
isMemberSpecialization ||
isFunctionTemplateSpecialization,
D.isFunctionDefinition());
}
if (getLangOpts().CUDA) {
IdentifierInfo *II = NewFD->getIdentifier();
if (II &&
II->isStr(getLangOpts().HIP ? "hipConfigureCall"
: "cudaConfigureCall") &&
!NewFD->isInvalidDecl() &&
NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
if (!R->getAs<FunctionType>()->getReturnType()->isScalarType())
Diag(NewFD->getLocation(), diag::err_config_scalar_return);
Context.setcudaConfigureCallDecl(NewFD);
}
// Variadic functions, other than a *declaration* of printf, are not allowed
// in device-side CUDA code, unless someone passed
// -fcuda-allow-variadic-functions.
if (!getLangOpts().CUDAAllowVariadicFunctions && NewFD->isVariadic() &&
(NewFD->hasAttr<CUDADeviceAttr>() ||
NewFD->hasAttr<CUDAGlobalAttr>()) &&
!(II && II->isStr("printf") && NewFD->isExternC() &&
!D.isFunctionDefinition())) {
Diag(NewFD->getLocation(), diag::err_variadic_device_fn);
}
}
MarkUnusedFileScopedDecl(NewFD);
if (getLangOpts().CPlusPlus) {
if (FunctionTemplate) {
if (NewFD->isInvalidDecl())
FunctionTemplate->setInvalidDecl();
return FunctionTemplate;
}
if (isMemberSpecialization && !NewFD->isInvalidDecl())
CompleteMemberSpecialization(NewFD, Previous);
}
if (NewFD->hasAttr<OpenCLKernelAttr>()) {
// OpenCL v1.2 s6.8 static is invalid for kernel functions.
if ((getLangOpts().OpenCLVersion >= 120)
&& (SC == SC_Static)) {
Diag(D.getIdentifierLoc(), diag::err_static_kernel);
D.setInvalidType();
}
// OpenCL v1.2, s6.9 -- Kernels can only have return type void.
if (!NewFD->getReturnType()->isVoidType()) {
SourceRange RTRange = NewFD->getReturnTypeSourceRange();
Diag(D.getIdentifierLoc(), diag::err_expected_kernel_void_return_type)
<< (RTRange.isValid() ? FixItHint::CreateReplacement(RTRange, "void")
: FixItHint());
D.setInvalidType();
}
llvm::SmallPtrSet<const Type *, 16> ValidTypes;
for (auto Param : NewFD->parameters())
checkIsValidOpenCLKernelParameter(*this, D, Param, ValidTypes);
}
for (const ParmVarDecl *Param : NewFD->parameters()) {
QualType PT = Param->getType();
// OpenCL 2.0 pipe restrictions forbids pipe packet types to be non-value
// types.
if (getLangOpts().OpenCLVersion >= 200) {
if(const PipeType *PipeTy = PT->getAs<PipeType>()) {
QualType ElemTy = PipeTy->getElementType();
if (ElemTy->isReferenceType() || ElemTy->isPointerType()) {
Diag(Param->getTypeSpecStartLoc(), diag::err_reference_pipe_type );
D.setInvalidType();
}
}
}
}
// Here we have an function template explicit specialization at class scope.
// The actual specialization will be postponed to template instatiation
// time via the ClassScopeFunctionSpecializationDecl node.
if (isDependentClassScopeExplicitSpecialization) {
ClassScopeFunctionSpecializationDecl *NewSpec =
ClassScopeFunctionSpecializationDecl::Create(
Context, CurContext, NewFD->getLocation(),
cast<CXXMethodDecl>(NewFD),
HasExplicitTemplateArgs, TemplateArgs);
CurContext->addDecl(NewSpec);
AddToScope = false;
}
// Diagnose availability attributes. Availability cannot be used on functions
// that are run during load/unload.
if (const auto *attr = NewFD->getAttr<AvailabilityAttr>()) {
if (NewFD->hasAttr<ConstructorAttr>()) {
Diag(attr->getLocation(), diag::warn_availability_on_static_initializer)
<< 1;
NewFD->dropAttr<AvailabilityAttr>();
}
if (NewFD->hasAttr<DestructorAttr>()) {
Diag(attr->getLocation(), diag::warn_availability_on_static_initializer)
<< 2;
NewFD->dropAttr<AvailabilityAttr>();
}
}
return NewFD;
}
/// Return a CodeSegAttr from a containing class. The Microsoft docs say
/// when __declspec(code_seg) "is applied to a class, all member functions of
/// the class and nested classes -- this includes compiler-generated special
/// member functions -- are put in the specified segment."
/// The actual behavior is a little more complicated. The Microsoft compiler
/// won't check outer classes if there is an active value from #pragma code_seg.
/// The CodeSeg is always applied from the direct parent but only from outer
/// classes when the #pragma code_seg stack is empty. See:
/// https://reviews.llvm.org/D22931, the Microsoft feedback page is no longer
/// available since MS has removed the page.
static Attr *getImplicitCodeSegAttrFromClass(Sema &S, const FunctionDecl *FD) {
const auto *Method = dyn_cast<CXXMethodDecl>(FD);
if (!Method)
return nullptr;
const CXXRecordDecl *Parent = Method->getParent();
if (const auto *SAttr = Parent->getAttr<CodeSegAttr>()) {
Attr *NewAttr = SAttr->clone(S.getASTContext());
NewAttr->setImplicit(true);
return NewAttr;
}
// The Microsoft compiler won't check outer classes for the CodeSeg
// when the #pragma code_seg stack is active.
if (S.CodeSegStack.CurrentValue)
return nullptr;
while ((Parent = dyn_cast<CXXRecordDecl>(Parent->getParent()))) {
if (const auto *SAttr = Parent->getAttr<CodeSegAttr>()) {
Attr *NewAttr = SAttr->clone(S.getASTContext());
NewAttr->setImplicit(true);
return NewAttr;
}
}
return nullptr;
}
/// Returns an implicit CodeSegAttr if a __declspec(code_seg) is found on a
/// containing class. Otherwise it will return implicit SectionAttr if the
/// function is a definition and there is an active value on CodeSegStack
/// (from the current #pragma code-seg value).
///
/// \param FD Function being declared.
/// \param IsDefinition Whether it is a definition or just a declarartion.
/// \returns A CodeSegAttr or SectionAttr to apply to the function or
/// nullptr if no attribute should be added.
Attr *Sema::getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition) {
if (Attr *A = getImplicitCodeSegAttrFromClass(*this, FD))
return A;
if (!FD->hasAttr<SectionAttr>() && IsDefinition &&
CodeSegStack.CurrentValue) {
return SectionAttr::CreateImplicit(getASTContext(),
SectionAttr::Declspec_allocate,
CodeSegStack.CurrentValue->getString(),
CodeSegStack.CurrentPragmaLocation);
}
return nullptr;
}
+
+/// Determines if we can perform a correct type check for \p D as a
+/// redeclaration of \p PrevDecl. If not, we can generally still perform a
+/// best-effort check.
+///
+/// \param NewD The new declaration.
+/// \param OldD The old declaration.
+/// \param NewT The portion of the type of the new declaration to check.
+/// \param OldT The portion of the type of the old declaration to check.
+bool Sema::canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
+ QualType NewT, QualType OldT) {
+ if (!NewD->getLexicalDeclContext()->isDependentContext())
+ return true;
+
+ // For dependently-typed local extern declarations and friends, we can't
+ // perform a correct type check in general until instantiation:
+ //
+ // int f();
+ // template<typename T> void g() { T f(); }
+ //
+ // (valid if g() is only instantiated with T = int).
+ if (NewT->isDependentType() &&
+ (NewD->isLocalExternDecl() || NewD->getFriendObjectKind()))
+ return false;
+
+ // Similarly, if the previous declaration was a dependent local extern
+ // declaration, we don't really know its type yet.
+ if (OldT->isDependentType() && OldD->isLocalExternDecl())
+ return false;
+
+ return true;
+}
+
/// Checks if the new declaration declared in dependent context must be
/// put in the same redeclaration chain as the specified declaration.
///
/// \param D Declaration that is checked.
/// \param PrevDecl Previous declaration found with proper lookup method for the
/// same declaration name.
/// \returns True if D must be added to the redeclaration chain which PrevDecl
/// belongs to.
///
bool Sema::shouldLinkDependentDeclWithPrevious(Decl *D, Decl *PrevDecl) {
- // Any declarations should be put into redeclaration chains except for
- // friend declaration in a dependent context that names a function in
- // namespace scope.
+ if (!D->getLexicalDeclContext()->isDependentContext())
+ return true;
+
+ // Don't chain dependent friend function definitions until instantiation, to
+ // permit cases like
//
- // This allows to compile code like:
+ // void func();
+ // template<typename T> class C1 { friend void func() {} };
+ // template<typename T> class C2 { friend void func() {} };
//
- // void func();
- // template<typename T> class C1 { friend void func() { } };
- // template<typename T> class C2 { friend void func() { } };
+ // ... which is valid if only one of C1 and C2 is ever instantiated.
//
- // This code snippet is a valid code unless both templates are instantiated.
- return !(D->getLexicalDeclContext()->isDependentContext() &&
- D->getDeclContext()->isFileContext() &&
- D->getFriendObjectKind() != Decl::FOK_None);
+ // FIXME: This need only apply to function definitions. For now, we proxy
+ // this by checking for a file-scope function. We do not want this to apply
+ // to friend declarations nominating member functions, because that gets in
+ // the way of access checks.
+ if (D->getFriendObjectKind() && D->getDeclContext()->isFileContext())
+ return false;
+
+ auto *VD = dyn_cast<ValueDecl>(D);
+ auto *PrevVD = dyn_cast<ValueDecl>(PrevDecl);
+ return !VD || !PrevVD ||
+ canFullyTypeCheckRedeclaration(VD, PrevVD, VD->getType(),
+ PrevVD->getType());
}
namespace MultiVersioning {
enum Type { None, Target, CPUSpecific, CPUDispatch};
} // MultiVersionType
static MultiVersioning::Type
getMultiVersionType(const FunctionDecl *FD) {
if (FD->hasAttr<TargetAttr>())
return MultiVersioning::Target;
if (FD->hasAttr<CPUDispatchAttr>())
return MultiVersioning::CPUDispatch;
if (FD->hasAttr<CPUSpecificAttr>())
return MultiVersioning::CPUSpecific;
return MultiVersioning::None;
}
/// Check the target attribute of the function for MultiVersion
/// validity.
///
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionValue(Sema &S, const FunctionDecl *FD) {
const auto *TA = FD->getAttr<TargetAttr>();
assert(TA && "MultiVersion Candidate requires a target attribute");
TargetAttr::ParsedTargetAttr ParseInfo = TA->parse();
const TargetInfo &TargetInfo = S.Context.getTargetInfo();
enum ErrType { Feature = 0, Architecture = 1 };
if (!ParseInfo.Architecture.empty() &&
!TargetInfo.validateCpuIs(ParseInfo.Architecture)) {
S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
<< Architecture << ParseInfo.Architecture;
return true;
}
for (const auto &Feat : ParseInfo.Features) {
auto BareFeat = StringRef{Feat}.substr(1);
if (Feat[0] == '-') {
S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
<< Feature << ("no-" + BareFeat).str();
return true;
}
if (!TargetInfo.validateCpuSupports(BareFeat) ||
!TargetInfo.isValidFeatureName(BareFeat)) {
S.Diag(FD->getLocation(), diag::err_bad_multiversion_option)
<< Feature << BareFeat;
return true;
}
}
return false;
}
static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
const FunctionDecl *NewFD,
bool CausesMV,
MultiVersioning::Type MVType) {
enum DoesntSupport {
FuncTemplates = 0,
VirtFuncs = 1,
DeducedReturn = 2,
Constructors = 3,
Destructors = 4,
DeletedFuncs = 5,
DefaultedFuncs = 6,
ConstexprFuncs = 7,
};
enum Different {
CallingConv = 0,
ReturnType = 1,
ConstexprSpec = 2,
InlineSpec = 3,
StorageClass = 4,
Linkage = 5
};
bool IsCPUSpecificCPUDispatchMVType =
MVType == MultiVersioning::CPUDispatch ||
MVType == MultiVersioning::CPUSpecific;
if (OldFD && !OldFD->getType()->getAs<FunctionProtoType>()) {
S.Diag(OldFD->getLocation(), diag::err_multiversion_noproto);
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
return true;
}
if (!NewFD->getType()->getAs<FunctionProtoType>())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_noproto);
if (!S.getASTContext().getTargetInfo().supportsMultiVersioning()) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_supported);
if (OldFD)
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
return true;
}
// For now, disallow all other attributes. These should be opt-in, but
// an analysis of all of them is a future FIXME.
if (CausesMV && OldFD &&
std::distance(OldFD->attr_begin(), OldFD->attr_end()) != 1) {
S.Diag(OldFD->getLocation(), diag::err_multiversion_no_other_attrs)
<< IsCPUSpecificCPUDispatchMVType;
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
return true;
}
if (std::distance(NewFD->attr_begin(), NewFD->attr_end()) != 1)
return S.Diag(NewFD->getLocation(), diag::err_multiversion_no_other_attrs)
<< IsCPUSpecificCPUDispatchMVType;
if (NewFD->getTemplatedKind() == FunctionDecl::TK_FunctionTemplate)
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << FuncTemplates;
if (const auto *NewCXXFD = dyn_cast<CXXMethodDecl>(NewFD)) {
if (NewCXXFD->isVirtual())
return S.Diag(NewCXXFD->getLocation(),
diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << VirtFuncs;
if (const auto *NewCXXCtor = dyn_cast<CXXConstructorDecl>(NewFD))
return S.Diag(NewCXXCtor->getLocation(),
diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << Constructors;
if (const auto *NewCXXDtor = dyn_cast<CXXDestructorDecl>(NewFD))
return S.Diag(NewCXXDtor->getLocation(),
diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << Destructors;
}
if (NewFD->isDeleted())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << DeletedFuncs;
if (NewFD->isDefaulted())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << DefaultedFuncs;
if (NewFD->isConstexpr() && (MVType == MultiVersioning::CPUDispatch ||
MVType == MultiVersioning::CPUSpecific))
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << ConstexprFuncs;
QualType NewQType = S.getASTContext().getCanonicalType(NewFD->getType());
const auto *NewType = cast<FunctionType>(NewQType);
QualType NewReturnType = NewType->getReturnType();
if (NewReturnType->isUndeducedType())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType << DeducedReturn;
// Only allow transition to MultiVersion if it hasn't been used.
if (OldFD && CausesMV && OldFD->isUsed(false))
return S.Diag(NewFD->getLocation(), diag::err_multiversion_after_used);
// Ensure the return type is identical.
if (OldFD) {
QualType OldQType = S.getASTContext().getCanonicalType(OldFD->getType());
const auto *OldType = cast<FunctionType>(OldQType);
FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo();
FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo();
if (OldTypeInfo.getCC() != NewTypeInfo.getCC())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< CallingConv;
QualType OldReturnType = OldType->getReturnType();
if (OldReturnType != NewReturnType)
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< ReturnType;
if (OldFD->isConstexpr() != NewFD->isConstexpr())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< ConstexprSpec;
if (OldFD->isInlineSpecified() != NewFD->isInlineSpecified())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< InlineSpec;
if (OldFD->getStorageClass() != NewFD->getStorageClass())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< StorageClass;
if (OldFD->isExternC() != NewFD->isExternC())
return S.Diag(NewFD->getLocation(), diag::err_multiversion_diff)
<< Linkage;
if (S.CheckEquivalentExceptionSpec(
OldFD->getType()->getAs<FunctionProtoType>(), OldFD->getLocation(),
NewFD->getType()->getAs<FunctionProtoType>(), NewFD->getLocation()))
return true;
}
return false;
}
/// Check the validity of a multiversion function declaration that is the
/// first of its kind. Also sets the multiversion'ness' of the function itself.
///
/// This sets NewFD->isInvalidDecl() to true if there was an error.
///
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD,
MultiVersioning::Type MVType,
const TargetAttr *TA,
const CPUDispatchAttr *CPUDisp,
const CPUSpecificAttr *CPUSpec) {
assert(MVType != MultiVersioning::None &&
"Function lacks multiversion attribute");
// Target only causes MV if it is default, otherwise this is a normal
// function.
if (MVType == MultiVersioning::Target && !TA->isDefaultVersion())
return false;
if (MVType == MultiVersioning::Target && CheckMultiVersionValue(S, FD)) {
FD->setInvalidDecl();
return true;
}
if (CheckMultiVersionAdditionalRules(S, nullptr, FD, true, MVType)) {
FD->setInvalidDecl();
return true;
}
FD->setIsMultiVersion();
return false;
}
static bool CheckTargetCausesMultiVersioning(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD, const TargetAttr *NewTA,
bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
LookupResult &Previous) {
const auto *OldTA = OldFD->getAttr<TargetAttr>();
TargetAttr::ParsedTargetAttr NewParsed = NewTA->parse();
// Sort order doesn't matter, it just needs to be consistent.
llvm::sort(NewParsed.Features.begin(), NewParsed.Features.end());
// If the old decl is NOT MultiVersioned yet, and we don't cause that
// to change, this is a simple redeclaration.
if (!OldTA || OldTA->getFeaturesStr() == NewTA->getFeaturesStr())
return false;
// Otherwise, this decl causes MultiVersioning.
if (!S.getASTContext().getTargetInfo().supportsMultiVersioning()) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_supported);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD, true,
MultiVersioning::Target)) {
NewFD->setInvalidDecl();
return true;
}
if (CheckMultiVersionValue(S, NewFD)) {
NewFD->setInvalidDecl();
return true;
}
if (CheckMultiVersionValue(S, OldFD)) {
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
NewFD->setInvalidDecl();
return true;
}
TargetAttr::ParsedTargetAttr OldParsed =
OldTA->parse(std::less<std::string>());
if (OldParsed == NewParsed) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
for (const auto *FD : OldFD->redecls()) {
const auto *CurTA = FD->getAttr<TargetAttr>();
if (!CurTA || CurTA->isInherited()) {
S.Diag(FD->getLocation(), diag::err_multiversion_required_in_redecl)
<< 0;
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
NewFD->setInvalidDecl();
return true;
}
}
OldFD->setIsMultiVersion();
NewFD->setIsMultiVersion();
Redeclaration = false;
MergeTypeWithPrevious = false;
OldDecl = nullptr;
Previous.clear();
return false;
}
/// Check the validity of a new function declaration being added to an existing
/// multiversioned declaration collection.
static bool CheckMultiVersionAdditionalDecl(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD,
MultiVersioning::Type NewMVType, const TargetAttr *NewTA,
const CPUDispatchAttr *NewCPUDisp, const CPUSpecificAttr *NewCPUSpec,
bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
LookupResult &Previous) {
MultiVersioning::Type OldMVType = getMultiVersionType(OldFD);
// Disallow mixing of multiversioning types.
if ((OldMVType == MultiVersioning::Target &&
NewMVType != MultiVersioning::Target) ||
(NewMVType == MultiVersioning::Target &&
OldMVType != MultiVersioning::Target)) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
TargetAttr::ParsedTargetAttr NewParsed;
if (NewTA) {
NewParsed = NewTA->parse();
llvm::sort(NewParsed.Features.begin(), NewParsed.Features.end());
}
bool UseMemberUsingDeclRules =
S.CurContext->isRecord() && !NewFD->getFriendObjectKind();
// Next, check ALL non-overloads to see if this is a redeclaration of a
// previous member of the MultiVersion set.
for (NamedDecl *ND : Previous) {
FunctionDecl *CurFD = ND->getAsFunction();
if (!CurFD)
continue;
if (S.IsOverload(NewFD, CurFD, UseMemberUsingDeclRules))
continue;
if (NewMVType == MultiVersioning::Target) {
const auto *CurTA = CurFD->getAttr<TargetAttr>();
if (CurTA->getFeaturesStr() == NewTA->getFeaturesStr()) {
NewFD->setIsMultiVersion();
Redeclaration = true;
OldDecl = ND;
return false;
}
TargetAttr::ParsedTargetAttr CurParsed =
CurTA->parse(std::less<std::string>());
if (CurParsed == NewParsed) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_duplicate);
S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
} else {
const auto *CurCPUSpec = CurFD->getAttr<CPUSpecificAttr>();
const auto *CurCPUDisp = CurFD->getAttr<CPUDispatchAttr>();
// Handle CPUDispatch/CPUSpecific versions.
// Only 1 CPUDispatch function is allowed, this will make it go through
// the redeclaration errors.
if (NewMVType == MultiVersioning::CPUDispatch &&
CurFD->hasAttr<CPUDispatchAttr>()) {
if (CurCPUDisp->cpus_size() == NewCPUDisp->cpus_size() &&
std::equal(
CurCPUDisp->cpus_begin(), CurCPUDisp->cpus_end(),
NewCPUDisp->cpus_begin(),
[](const IdentifierInfo *Cur, const IdentifierInfo *New) {
return Cur->getName() == New->getName();
})) {
NewFD->setIsMultiVersion();
Redeclaration = true;
OldDecl = ND;
return false;
}
// If the declarations don't match, this is an error condition.
S.Diag(NewFD->getLocation(), diag::err_cpu_dispatch_mismatch);
S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
if (NewMVType == MultiVersioning::CPUSpecific && CurCPUSpec) {
if (CurCPUSpec->cpus_size() == NewCPUSpec->cpus_size() &&
std::equal(
CurCPUSpec->cpus_begin(), CurCPUSpec->cpus_end(),
NewCPUSpec->cpus_begin(),
[](const IdentifierInfo *Cur, const IdentifierInfo *New) {
return Cur->getName() == New->getName();
})) {
NewFD->setIsMultiVersion();
Redeclaration = true;
OldDecl = ND;
return false;
}
// Only 1 version of CPUSpecific is allowed for each CPU.
for (const IdentifierInfo *CurII : CurCPUSpec->cpus()) {
for (const IdentifierInfo *NewII : NewCPUSpec->cpus()) {
if (CurII == NewII) {
S.Diag(NewFD->getLocation(), diag::err_cpu_specific_multiple_defs)
<< NewII;
S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
}
}
}
// If the two decls aren't the same MVType, there is no possible error
// condition.
}
}
// Else, this is simply a non-redecl case. Checking the 'value' is only
// necessary in the Target case, since The CPUSpecific/Dispatch cases are
// handled in the attribute adding step.
if (NewMVType == MultiVersioning::Target &&
CheckMultiVersionValue(S, NewFD)) {
NewFD->setInvalidDecl();
return true;
}
if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD, false, NewMVType)) {
NewFD->setInvalidDecl();
return true;
}
NewFD->setIsMultiVersion();
Redeclaration = false;
MergeTypeWithPrevious = false;
OldDecl = nullptr;
Previous.clear();
return false;
}
/// Check the validity of a mulitversion function declaration.
/// Also sets the multiversion'ness' of the function itself.
///
/// This sets NewFD->isInvalidDecl() to true if there was an error.
///
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
bool &Redeclaration, NamedDecl *&OldDecl,
bool &MergeTypeWithPrevious,
LookupResult &Previous) {
const auto *NewTA = NewFD->getAttr<TargetAttr>();
const auto *NewCPUDisp = NewFD->getAttr<CPUDispatchAttr>();
const auto *NewCPUSpec = NewFD->getAttr<CPUSpecificAttr>();
// Mixing Multiversioning types is prohibited.
if ((NewTA && NewCPUDisp) || (NewTA && NewCPUSpec) ||
(NewCPUDisp && NewCPUSpec)) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
NewFD->setInvalidDecl();
return true;
}
MultiVersioning::Type MVType = getMultiVersionType(NewFD);
// Main isn't allowed to become a multiversion function, however it IS
// permitted to have 'main' be marked with the 'target' optimization hint.
if (NewFD->isMain()) {
if ((MVType == MultiVersioning::Target && NewTA->isDefaultVersion()) ||
MVType == MultiVersioning::CPUDispatch ||
MVType == MultiVersioning::CPUSpecific) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_allowed_on_main);
NewFD->setInvalidDecl();
return true;
}
return false;
}
if (!OldDecl || !OldDecl->getAsFunction() ||
OldDecl->getDeclContext()->getRedeclContext() !=
NewFD->getDeclContext()->getRedeclContext()) {
// If there's no previous declaration, AND this isn't attempting to cause
// multiversioning, this isn't an error condition.
if (MVType == MultiVersioning::None)
return false;
return CheckMultiVersionFirstFunction(S, NewFD, MVType, NewTA, NewCPUDisp,
NewCPUSpec);
}
FunctionDecl *OldFD = OldDecl->getAsFunction();
if (!OldFD->isMultiVersion() && MVType == MultiVersioning::None)
return false;
if (OldFD->isMultiVersion() && MVType == MultiVersioning::None) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_required_in_redecl)
<< (getMultiVersionType(OldFD) != MultiVersioning::Target);
NewFD->setInvalidDecl();
return true;
}
// Handle the target potentially causes multiversioning case.
if (!OldFD->isMultiVersion() && MVType == MultiVersioning::Target)
return CheckTargetCausesMultiVersioning(S, OldFD, NewFD, NewTA,
Redeclaration, OldDecl,
MergeTypeWithPrevious, Previous);
// Previous declarations lack CPUDispatch/CPUSpecific.
if (!OldFD->isMultiVersion()) {
S.Diag(OldFD->getLocation(), diag::err_multiversion_required_in_redecl)
<< 1;
S.Diag(NewFD->getLocation(), diag::note_multiversioning_caused_here);
NewFD->setInvalidDecl();
return true;
}
// At this point, we have a multiversion function decl (in OldFD) AND an
// appropriate attribute in the current function decl. Resolve that these are
// still compatible with previous declarations.
return CheckMultiVersionAdditionalDecl(
S, OldFD, NewFD, MVType, NewTA, NewCPUDisp, NewCPUSpec, Redeclaration,
OldDecl, MergeTypeWithPrevious, Previous);
}
/// Perform semantic checking of a new function declaration.
///
/// Performs semantic analysis of the new function declaration
/// NewFD. This routine performs all semantic checking that does not
/// require the actual declarator involved in the declaration, and is
/// used both for the declaration of functions as they are parsed
/// (called via ActOnDeclarator) and for the declaration of functions
/// that have been instantiated via C++ template instantiation (called
/// via InstantiateDecl).
///
/// \param IsMemberSpecialization whether this new function declaration is
/// a member specialization (that replaces any definition provided by the
/// previous declaration).
///
/// This sets NewFD->isInvalidDecl() to true if there was an error.
///
/// \returns true if the function declaration is a redeclaration.
bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
LookupResult &Previous,
bool IsMemberSpecialization) {
assert(!NewFD->getReturnType()->isVariablyModifiedType() &&
"Variably modified return types are not handled here");
// Determine whether the type of this function should be merged with
// a previous visible declaration. This never happens for functions in C++,
// and always happens in C if the previous declaration was visible.
bool MergeTypeWithPrevious = !getLangOpts().CPlusPlus &&
!Previous.isShadowed();
bool Redeclaration = false;
NamedDecl *OldDecl = nullptr;
bool MayNeedOverloadableChecks = false;
// Merge or overload the declaration with an existing declaration of
// the same name, if appropriate.
if (!Previous.empty()) {
// Determine whether NewFD is an overload of PrevDecl or
// a declaration that requires merging. If it's an overload,
// there's no more work to do here; we'll just add the new
// function to the scope.
if (!AllowOverloadingOfFunction(Previous, Context, NewFD)) {
NamedDecl *Candidate = Previous.getRepresentativeDecl();
if (shouldLinkPossiblyHiddenDecl(Candidate, NewFD)) {
Redeclaration = true;
OldDecl = Candidate;
}
} else {
MayNeedOverloadableChecks = true;
switch (CheckOverload(S, NewFD, Previous, OldDecl,
/*NewIsUsingDecl*/ false)) {
case Ovl_Match:
Redeclaration = true;
break;
case Ovl_NonFunction:
Redeclaration = true;
break;
case Ovl_Overload:
Redeclaration = false;
break;
}
}
}
// Check for a previous extern "C" declaration with this name.
if (!Redeclaration &&
checkForConflictWithNonVisibleExternC(*this, NewFD, Previous)) {
if (!Previous.empty()) {
// This is an extern "C" declaration with the same name as a previous
// declaration, and thus redeclares that entity...
Redeclaration = true;
OldDecl = Previous.getFoundDecl();
MergeTypeWithPrevious = false;
// ... except in the presence of __attribute__((overloadable)).
if (OldDecl->hasAttr<OverloadableAttr>() ||
NewFD->hasAttr<OverloadableAttr>()) {
if (IsOverload(NewFD, cast<FunctionDecl>(OldDecl), false)) {
MayNeedOverloadableChecks = true;
Redeclaration = false;
OldDecl = nullptr;
}
}
}
}
if (CheckMultiVersionFunction(*this, NewFD, Redeclaration, OldDecl,
MergeTypeWithPrevious, Previous))
return Redeclaration;
// C++11 [dcl.constexpr]p8:
// A constexpr specifier for a non-static member function that is not
// a constructor declares that member function to be const.
//
// This needs to be delayed until we know whether this is an out-of-line
// definition of a static member function.
//
// This rule is not present in C++1y, so we produce a backwards
// compatibility warning whenever it happens in C++11.
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
if (!getLangOpts().CPlusPlus14 && MD && MD->isConstexpr() &&
!MD->isStatic() && !isa<CXXConstructorDecl>(MD) &&
(MD->getTypeQualifiers() & Qualifiers::Const) == 0) {
CXXMethodDecl *OldMD = nullptr;
if (OldDecl)
OldMD = dyn_cast_or_null<CXXMethodDecl>(OldDecl->getAsFunction());
if (!OldMD || !OldMD->isStatic()) {
const FunctionProtoType *FPT =
MD->getType()->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
EPI.TypeQuals |= Qualifiers::Const;
MD->setType(Context.getFunctionType(FPT->getReturnType(),
FPT->getParamTypes(), EPI));
// Warn that we did this, if we're not performing template instantiation.
// In that case, we'll have warned already when the template was defined.
if (!inTemplateInstantiation()) {
SourceLocation AddConstLoc;
if (FunctionTypeLoc FTL = MD->getTypeSourceInfo()->getTypeLoc()
.IgnoreParens().getAs<FunctionTypeLoc>())
AddConstLoc = getLocForEndOfToken(FTL.getRParenLoc());
Diag(MD->getLocation(), diag::warn_cxx14_compat_constexpr_not_const)
<< FixItHint::CreateInsertion(AddConstLoc, " const");
}
}
}
if (Redeclaration) {
// NewFD and OldDecl represent declarations that need to be
// merged.
if (MergeFunctionDecl(NewFD, OldDecl, S, MergeTypeWithPrevious)) {
NewFD->setInvalidDecl();
return Redeclaration;
}
Previous.clear();
Previous.addDecl(OldDecl);
if (FunctionTemplateDecl *OldTemplateDecl =
dyn_cast<FunctionTemplateDecl>(OldDecl)) {
auto *OldFD = OldTemplateDecl->getTemplatedDecl();
NewFD->setPreviousDeclaration(OldFD);
adjustDeclContextForDeclaratorDecl(NewFD, OldFD);
FunctionTemplateDecl *NewTemplateDecl
= NewFD->getDescribedFunctionTemplate();
assert(NewTemplateDecl && "Template/non-template mismatch");
if (NewFD->isCXXClassMember()) {
NewFD->setAccess(OldTemplateDecl->getAccess());
NewTemplateDecl->setAccess(OldTemplateDecl->getAccess());
}
// If this is an explicit specialization of a member that is a function
// template, mark it as a member specialization.
if (IsMemberSpecialization &&
NewTemplateDecl->getInstantiatedFromMemberTemplate()) {
NewTemplateDecl->setMemberSpecialization();
assert(OldTemplateDecl->isMemberSpecialization());
// Explicit specializations of a member template do not inherit deleted
// status from the parent member template that they are specializing.
if (OldFD->isDeleted()) {
// FIXME: This assert will not hold in the presence of modules.
assert(OldFD->getCanonicalDecl() == OldFD);
// FIXME: We need an update record for this AST mutation.
OldFD->setDeletedAsWritten(false);
}
}
} else {
if (shouldLinkDependentDeclWithPrevious(NewFD, OldDecl)) {
auto *OldFD = cast<FunctionDecl>(OldDecl);
// This needs to happen first so that 'inline' propagates.
NewFD->setPreviousDeclaration(OldFD);
adjustDeclContextForDeclaratorDecl(NewFD, OldFD);
if (NewFD->isCXXClassMember())
NewFD->setAccess(OldFD->getAccess());
}
}
} else if (!getLangOpts().CPlusPlus && MayNeedOverloadableChecks &&
!NewFD->getAttr<OverloadableAttr>()) {
assert((Previous.empty() ||
llvm::any_of(Previous,
[](const NamedDecl *ND) {
return ND->hasAttr<OverloadableAttr>();
})) &&
"Non-redecls shouldn't happen without overloadable present");
auto OtherUnmarkedIter = llvm::find_if(Previous, [](const NamedDecl *ND) {
const auto *FD = dyn_cast<FunctionDecl>(ND);
return FD && !FD->hasAttr<OverloadableAttr>();
});
if (OtherUnmarkedIter != Previous.end()) {
Diag(NewFD->getLocation(),
diag::err_attribute_overloadable_multiple_unmarked_overloads);
Diag((*OtherUnmarkedIter)->getLocation(),
diag::note_attribute_overloadable_prev_overload)
<< false;
NewFD->addAttr(OverloadableAttr::CreateImplicit(Context));
}
}
// Semantic checking for this function declaration (in isolation).
if (getLangOpts().CPlusPlus) {
// C++-specific checks.
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(NewFD)) {
CheckConstructor(Constructor);
} else if (CXXDestructorDecl *Destructor =
dyn_cast<CXXDestructorDecl>(NewFD)) {
CXXRecordDecl *Record = Destructor->getParent();
QualType ClassType = Context.getTypeDeclType(Record);
// FIXME: Shouldn't we be able to perform this check even when the class
// type is dependent? Both gcc and edg can handle that.
if (!ClassType->isDependentType()) {
DeclarationName Name
= Context.DeclarationNames.getCXXDestructorName(
Context.getCanonicalType(ClassType));
if (NewFD->getDeclName() != Name) {
Diag(NewFD->getLocation(), diag::err_destructor_name);
NewFD->setInvalidDecl();
return Redeclaration;
}
}
} else if (CXXConversionDecl *Conversion
= dyn_cast<CXXConversionDecl>(NewFD)) {
ActOnConversionDeclarator(Conversion);
} else if (auto *Guide = dyn_cast<CXXDeductionGuideDecl>(NewFD)) {
if (auto *TD = Guide->getDescribedFunctionTemplate())
CheckDeductionGuideTemplate(TD);
// A deduction guide is not on the list of entities that can be
// explicitly specialized.
if (Guide->getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
Diag(Guide->getLocStart(), diag::err_deduction_guide_specialized)
<< /*explicit specialization*/ 1;
}
// Find any virtual functions that this function overrides.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD)) {
if (!Method->isFunctionTemplateSpecialization() &&
!Method->getDescribedFunctionTemplate() &&
Method->isCanonicalDecl()) {
if (AddOverriddenMethods(Method->getParent(), Method)) {
// If the function was marked as "static", we have a problem.
if (NewFD->getStorageClass() == SC_Static) {
ReportOverrides(*this, diag::err_static_overrides_virtual, Method);
}
}
}
if (Method->isStatic())
checkThisInStaticMemberFunctionType(Method);
}
// Extra checking for C++ overloaded operators (C++ [over.oper]).
if (NewFD->isOverloadedOperator() &&
CheckOverloadedOperatorDeclaration(NewFD)) {
NewFD->setInvalidDecl();
return Redeclaration;
}
// Extra checking for C++0x literal operators (C++0x [over.literal]).
if (NewFD->getLiteralIdentifier() &&
CheckLiteralOperatorDeclaration(NewFD)) {
NewFD->setInvalidDecl();
return Redeclaration;
}
// In C++, check default arguments now that we have merged decls. Unless
// the lexical context is the class, because in this case this is done
// during delayed parsing anyway.
if (!CurContext->isRecord())
CheckCXXDefaultArguments(NewFD);
// If this function declares a builtin function, check the type of this
// declaration against the expected type for the builtin.
if (unsigned BuiltinID = NewFD->getBuiltinID()) {
ASTContext::GetBuiltinTypeError Error;
LookupPredefedObjCSuperType(*this, S, NewFD->getIdentifier());
QualType T = Context.GetBuiltinType(BuiltinID, Error);
// If the type of the builtin differs only in its exception
// specification, that's OK.
// FIXME: If the types do differ in this way, it would be better to
// retain the 'noexcept' form of the type.
if (!T.isNull() &&
!Context.hasSameFunctionTypeIgnoringExceptionSpec(T,
NewFD->getType()))
// The type of this function differs from the type of the builtin,
// so forget about the builtin entirely.
Context.BuiltinInfo.forgetBuiltin(BuiltinID, Context.Idents);
}
// If this function is declared as being extern "C", then check to see if
// the function returns a UDT (class, struct, or union type) that is not C
// compatible, and if it does, warn the user.
// But, issue any diagnostic on the first declaration only.
if (Previous.empty() && NewFD->isExternC()) {
QualType R = NewFD->getReturnType();
if (R->isIncompleteType() && !R->isVoidType())
Diag(NewFD->getLocation(), diag::warn_return_value_udt_incomplete)
<< NewFD << R;
else if (!R.isPODType(Context) && !R->isVoidType() &&
!R->isObjCObjectPointerType())
Diag(NewFD->getLocation(), diag::warn_return_value_udt) << NewFD << R;
}
// C++1z [dcl.fct]p6:
// [...] whether the function has a non-throwing exception-specification
// [is] part of the function type
//
// This results in an ABI break between C++14 and C++17 for functions whose
// declared type includes an exception-specification in a parameter or
// return type. (Exception specifications on the function itself are OK in
// most cases, and exception specifications are not permitted in most other
// contexts where they could make it into a mangling.)
if (!getLangOpts().CPlusPlus17 && !NewFD->getPrimaryTemplate()) {
auto HasNoexcept = [&](QualType T) -> bool {
// Strip off declarator chunks that could be between us and a function
// type. We don't need to look far, exception specifications are very
// restricted prior to C++17.
if (auto *RT = T->getAs<ReferenceType>())
T = RT->getPointeeType();
else if (T->isAnyPointerType())
T = T->getPointeeType();
else if (auto *MPT = T->getAs<MemberPointerType>())
T = MPT->getPointeeType();
if (auto *FPT = T->getAs<FunctionProtoType>())
if (FPT->isNothrow())
return true;
return false;
};
auto *FPT = NewFD->getType()->castAs<FunctionProtoType>();
bool AnyNoexcept = HasNoexcept(FPT->getReturnType());
for (QualType T : FPT->param_types())
AnyNoexcept |= HasNoexcept(T);
if (AnyNoexcept)
Diag(NewFD->getLocation(),
diag::warn_cxx17_compat_exception_spec_in_signature)
<< NewFD;
}
if (!Redeclaration && LangOpts.CUDA)
checkCUDATargetOverload(NewFD, Previous);
}
return Redeclaration;
}
void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) {
// C++11 [basic.start.main]p3:
// A program that [...] declares main to be inline, static or
// constexpr is ill-formed.
// C11 6.7.4p4: In a hosted environment, no function specifier(s) shall
// appear in a declaration of main.
// static main is not an error under C99, but we should warn about it.
// We accept _Noreturn main as an extension.
if (FD->getStorageClass() == SC_Static)
Diag(DS.getStorageClassSpecLoc(), getLangOpts().CPlusPlus
? diag::err_static_main : diag::warn_static_main)
<< FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
if (FD->isInlineSpecified())
Diag(DS.getInlineSpecLoc(), diag::err_inline_main)
<< FixItHint::CreateRemoval(DS.getInlineSpecLoc());
if (DS.isNoreturnSpecified()) {
SourceLocation NoreturnLoc = DS.getNoreturnSpecLoc();
SourceRange NoreturnRange(NoreturnLoc, getLocForEndOfToken(NoreturnLoc));
Diag(NoreturnLoc, diag::ext_noreturn_main);
Diag(NoreturnLoc, diag::note_main_remove_noreturn)
<< FixItHint::CreateRemoval(NoreturnRange);
}
if (FD->isConstexpr()) {
Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_main)
<< FixItHint::CreateRemoval(DS.getConstexprSpecLoc());
FD->setConstexpr(false);
}
if (getLangOpts().OpenCL) {
Diag(FD->getLocation(), diag::err_opencl_no_main)
<< FD->hasAttr<OpenCLKernelAttr>();
FD->setInvalidDecl();
return;
}
QualType T = FD->getType();
assert(T->isFunctionType() && "function decl is not of function type");
const FunctionType* FT = T->castAs<FunctionType>();
// Set default calling convention for main()
if (FT->getCallConv() != CC_C) {
FT = Context.adjustFunctionType(FT, FT->getExtInfo().withCallingConv(CC_C));
FD->setType(QualType(FT, 0));
T = Context.getCanonicalType(FD->getType());
}
if (getLangOpts().GNUMode && !getLangOpts().CPlusPlus) {
// In C with GNU extensions we allow main() to have non-integer return
// type, but we should warn about the extension, and we disable the
// implicit-return-zero rule.
// GCC in C mode accepts qualified 'int'.
if (Context.hasSameUnqualifiedType(FT->getReturnType(), Context.IntTy))
FD->setHasImplicitReturnZero(true);
else {
Diag(FD->getTypeSpecStartLoc(), diag::ext_main_returns_nonint);
SourceRange RTRange = FD->getReturnTypeSourceRange();
if (RTRange.isValid())
Diag(RTRange.getBegin(), diag::note_main_change_return_type)
<< FixItHint::CreateReplacement(RTRange, "int");
}
} else {
// In C and C++, main magically returns 0 if you fall off the end;
// set the flag which tells us that.
// This is C++ [basic.start.main]p5 and C99 5.1.2.2.3.
// All the standards say that main() should return 'int'.
if (Context.hasSameType(FT->getReturnType(), Context.IntTy))
FD->setHasImplicitReturnZero(true);
else {
// Otherwise, this is just a flat-out error.
SourceRange RTRange = FD->getReturnTypeSourceRange();
Diag(FD->getTypeSpecStartLoc(), diag::err_main_returns_nonint)
<< (RTRange.isValid() ? FixItHint::CreateReplacement(RTRange, "int")
: FixItHint());
FD->setInvalidDecl(true);
}
}
// Treat protoless main() as nullary.
if (isa<FunctionNoProtoType>(FT)) return;
const FunctionProtoType* FTP = cast<const FunctionProtoType>(FT);
unsigned nparams = FTP->getNumParams();
assert(FD->getNumParams() == nparams);
bool HasExtraParameters = (nparams > 3);
if (FTP->isVariadic()) {
Diag(FD->getLocation(), diag::ext_variadic_main);
// FIXME: if we had information about the location of the ellipsis, we
// could add a FixIt hint to remove it as a parameter.
}
// Darwin passes an undocumented fourth argument of type char**. If
// other platforms start sprouting these, the logic below will start
// getting shifty.
if (nparams == 4 && Context.getTargetInfo().getTriple().isOSDarwin())
HasExtraParameters = false;
if (HasExtraParameters) {
Diag(FD->getLocation(), diag::err_main_surplus_args) << nparams;
FD->setInvalidDecl(true);
nparams = 3;
}
// FIXME: a lot of the following diagnostics would be improved
// if we had some location information about types.
QualType CharPP =
Context.getPointerType(Context.getPointerType(Context.CharTy));
QualType Expected[] = { Context.IntTy, CharPP, CharPP, CharPP };
for (unsigned i = 0; i < nparams; ++i) {
QualType AT = FTP->getParamType(i);
bool mismatch = true;
if (Context.hasSameUnqualifiedType(AT, Expected[i]))
mismatch = false;
else if (Expected[i] == CharPP) {
// As an extension, the following forms are okay:
// char const **
// char const * const *
// char * const *
QualifierCollector qs;
const PointerType* PT;
if ((PT = qs.strip(AT)->getAs<PointerType>()) &&
(PT = qs.strip(PT->getPointeeType())->getAs<PointerType>()) &&
Context.hasSameType(QualType(qs.strip(PT->getPointeeType()), 0),
Context.CharTy)) {
qs.removeConst();
mismatch = !qs.empty();
}
}
if (mismatch) {
Diag(FD->getLocation(), diag::err_main_arg_wrong) << i << Expected[i];
// TODO: suggest replacing given type with expected type
FD->setInvalidDecl(true);
}
}
if (nparams == 1 && !FD->isInvalidDecl()) {
Diag(FD->getLocation(), diag::warn_main_one_arg);
}
if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) {
Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD;
FD->setInvalidDecl();
}
}
void Sema::CheckMSVCRTEntryPoint(FunctionDecl *FD) {
QualType T = FD->getType();
assert(T->isFunctionType() && "function decl is not of function type");
const FunctionType *FT = T->castAs<FunctionType>();
// Set an implicit return of 'zero' if the function can return some integral,
// enumeration, pointer or nullptr type.
if (FT->getReturnType()->isIntegralOrEnumerationType() ||
FT->getReturnType()->isAnyPointerType() ||
FT->getReturnType()->isNullPtrType())
// DllMain is exempt because a return value of zero means it failed.
if (FD->getName() != "DllMain")
FD->setHasImplicitReturnZero(true);
if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) {
Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD;
FD->setInvalidDecl();
}
}
bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) {
// FIXME: Need strict checking. In C89, we need to check for
// any assignment, increment, decrement, function-calls, or
// commas outside of a sizeof. In C99, it's the same list,
// except that the aforementioned are allowed in unevaluated
// expressions. Everything else falls under the
// "may accept other forms of constant expressions" exception.
// (We never end up here for C++, so the constant expression
// rules there don't matter.)
const Expr *Culprit;
if (Init->isConstantInitializer(Context, false, &Culprit))
return false;
Diag(Culprit->getExprLoc(), diag::err_init_element_not_constant)
<< Culprit->getSourceRange();
return true;
}
namespace {
// Visits an initialization expression to see if OrigDecl is evaluated in
// its own initialization and throws a warning if it does.
class SelfReferenceChecker
: public EvaluatedExprVisitor<SelfReferenceChecker> {
Sema &S;
Decl *OrigDecl;
bool isRecordType;
bool isPODType;
bool isReferenceType;
bool isInitList;
llvm::SmallVector<unsigned, 4> InitFieldIndex;
public:
typedef EvaluatedExprVisitor<SelfReferenceChecker> Inherited;
SelfReferenceChecker(Sema &S, Decl *OrigDecl) : Inherited(S.Context),
S(S), OrigDecl(OrigDecl) {
isPODType = false;
isRecordType = false;
isReferenceType = false;
isInitList = false;
if (ValueDecl *VD = dyn_cast<ValueDecl>(OrigDecl)) {
isPODType = VD->getType().isPODType(S.Context);
isRecordType = VD->getType()->isRecordType();
isReferenceType = VD->getType()->isReferenceType();
}
}
// For most expressions, just call the visitor. For initializer lists,
// track the index of the field being initialized since fields are
// initialized in order allowing use of previously initialized fields.
void CheckExpr(Expr *E) {
InitListExpr *InitList = dyn_cast<InitListExpr>(E);
if (!InitList) {
Visit(E);
return;
}
// Track and increment the index here.
isInitList = true;
InitFieldIndex.push_back(0);
for (auto Child : InitList->children()) {
CheckExpr(cast<Expr>(Child));
++InitFieldIndex.back();
}
InitFieldIndex.pop_back();
}
// Returns true if MemberExpr is checked and no further checking is needed.
// Returns false if additional checking is required.
bool CheckInitListMemberExpr(MemberExpr *E, bool CheckReference) {
llvm::SmallVector<FieldDecl*, 4> Fields;
Expr *Base = E;
bool ReferenceField = false;
// Get the field memebers used.
while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
if (!FD)
return false;
Fields.push_back(FD);
if (FD->getType()->isReferenceType())
ReferenceField = true;
Base = ME->getBase()->IgnoreParenImpCasts();
}
// Keep checking only if the base Decl is the same.
DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base);
if (!DRE || DRE->getDecl() != OrigDecl)
return false;
// A reference field can be bound to an unininitialized field.
if (CheckReference && !ReferenceField)
return true;
// Convert FieldDecls to their index number.
llvm::SmallVector<unsigned, 4> UsedFieldIndex;
for (const FieldDecl *I : llvm::reverse(Fields))
UsedFieldIndex.push_back(I->getFieldIndex());
// See if a warning is needed by checking the first difference in index
// numbers. If field being used has index less than the field being
// initialized, then the use is safe.
for (auto UsedIter = UsedFieldIndex.begin(),
UsedEnd = UsedFieldIndex.end(),
OrigIter = InitFieldIndex.begin(),
OrigEnd = InitFieldIndex.end();
UsedIter != UsedEnd && OrigIter != OrigEnd; ++UsedIter, ++OrigIter) {
if (*UsedIter < *OrigIter)
return true;
if (*UsedIter > *OrigIter)
break;
}
// TODO: Add a different warning which will print the field names.
HandleDeclRefExpr(DRE);
return true;
}
// For most expressions, the cast is directly above the DeclRefExpr.
// For conditional operators, the cast can be outside the conditional
// operator if both expressions are DeclRefExpr's.
void HandleValue(Expr *E) {
E = E->IgnoreParens();
if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(E)) {
HandleDeclRefExpr(DRE);
return;
}
if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
Visit(CO->getCond());
HandleValue(CO->getTrueExpr());
HandleValue(CO->getFalseExpr());
return;
}
if (BinaryConditionalOperator *BCO =
dyn_cast<BinaryConditionalOperator>(E)) {
Visit(BCO->getCond());
HandleValue(BCO->getFalseExpr());
return;
}
if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
HandleValue(OVE->getSourceExpr());
return;
}
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
if (BO->getOpcode() == BO_Comma) {
Visit(BO->getLHS());
HandleValue(BO->getRHS());
return;
}
}
if (isa<MemberExpr>(E)) {
if (isInitList) {
if (CheckInitListMemberExpr(cast<MemberExpr>(E),
false /*CheckReference*/))
return;
}
Expr *Base = E->IgnoreParenImpCasts();
while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
// Check for static member variables and don't warn on them.
if (!isa<FieldDecl>(ME->getMemberDecl()))
return;
Base = ME->getBase()->IgnoreParenImpCasts();
}
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base))
HandleDeclRefExpr(DRE);
return;
}
Visit(E);
}
// Reference types not handled in HandleValue are handled here since all
// uses of references are bad, not just r-value uses.
void VisitDeclRefExpr(DeclRefExpr *E) {
if (isReferenceType)
HandleDeclRefExpr(E);
}
void VisitImplicitCastExpr(ImplicitCastExpr *E) {
if (E->getCastKind() == CK_LValueToRValue) {
HandleValue(E->getSubExpr());
return;
}
Inherited::VisitImplicitCastExpr(E);
}
void VisitMemberExpr(MemberExpr *E) {
if (isInitList) {
if (CheckInitListMemberExpr(E, true /*CheckReference*/))
return;
}
// Don't warn on arrays since they can be treated as pointers.
if (E->getType()->canDecayToPointerType()) return;
// Warn when a non-static method call is followed by non-static member
// field accesses, which is followed by a DeclRefExpr.
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl());
bool Warn = (MD && !MD->isStatic());
Expr *Base = E->getBase()->IgnoreParenImpCasts();
while (MemberExpr *ME = dyn_cast<MemberExpr>(Base)) {
if (!isa<FieldDecl>(ME->getMemberDecl()))
Warn = false;
Base = ME->getBase()->IgnoreParenImpCasts();
}
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
if (Warn)
HandleDeclRefExpr(DRE);
return;
}
// The base of a MemberExpr is not a MemberExpr or a DeclRefExpr.
// Visit that expression.
Visit(Base);
}
void VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
Expr *Callee = E->getCallee();
if (isa<UnresolvedLookupExpr>(Callee))
return Inherited::VisitCXXOperatorCallExpr(E);
Visit(Callee);
for (auto Arg: E->arguments())
HandleValue(Arg->IgnoreParenImpCasts());
}
void VisitUnaryOperator(UnaryOperator *E) {
// For POD record types, addresses of its own members are well-defined.
if (E->getOpcode() == UO_AddrOf && isRecordType &&
isa<MemberExpr>(E->getSubExpr()->IgnoreParens())) {
if (!isPODType)
HandleValue(E->getSubExpr());
return;
}
if (E->isIncrementDecrementOp()) {
HandleValue(E->getSubExpr());
return;
}
Inherited::VisitUnaryOperator(E);
}
void VisitObjCMessageExpr(ObjCMessageExpr *E) {}
void VisitCXXConstructExpr(CXXConstructExpr *E) {
if (E->getConstructor()->isCopyConstructor()) {
Expr *ArgExpr = E->getArg(0);
if (InitListExpr *ILE = dyn_cast<InitListExpr>(ArgExpr))
if (ILE->getNumInits() == 1)
ArgExpr = ILE->getInit(0);
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr))
if (ICE->getCastKind() == CK_NoOp)
ArgExpr = ICE->getSubExpr();
HandleValue(ArgExpr);
return;
}
Inherited::VisitCXXConstructExpr(E);
}
void VisitCallExpr(CallExpr *E) {
// Treat std::move as a use.
if (E->isCallToStdMove()) {
HandleValue(E->getArg(0));
return;
}
Inherited::VisitCallExpr(E);
}
void VisitBinaryOperator(BinaryOperator *E) {
if (E->isCompoundAssignmentOp()) {
HandleValue(E->getLHS());
Visit(E->getRHS());
return;
}
Inherited::VisitBinaryOperator(E);
}
// A custom visitor for BinaryConditionalOperator is needed because the
// regular visitor would check the condition and true expression separately
// but both point to the same place giving duplicate diagnostics.
void VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
Visit(E->getCond());
Visit(E->getFalseExpr());
}
void HandleDeclRefExpr(DeclRefExpr *DRE) {
Decl* ReferenceDecl = DRE->getDecl();
if (OrigDecl != ReferenceDecl) return;
unsigned diag;
if (isReferenceType) {
diag = diag::warn_uninit_self_reference_in_reference_init;
} else if (cast<VarDecl>(OrigDecl)->isStaticLocal()) {
diag = diag::warn_static_self_reference_in_init;
} else if (isa<TranslationUnitDecl>(OrigDecl->getDeclContext()) ||
isa<NamespaceDecl>(OrigDecl->getDeclContext()) ||
DRE->getDecl()->getType()->isRecordType()) {
diag = diag::warn_uninit_self_reference_in_init;
} else {
// Local variables will be handled by the CFG analysis.
return;
}
S.DiagRuntimeBehavior(DRE->getLocStart(), DRE,
S.PDiag(diag)
<< DRE->getDecl()
<< OrigDecl->getLocation()
<< DRE->getSourceRange());
}
};
/// CheckSelfReference - Warns if OrigDecl is used in expression E.
static void CheckSelfReference(Sema &S, Decl* OrigDecl, Expr *E,
bool DirectInit) {
// Parameters arguments are occassionially constructed with itself,
// for instance, in recursive functions. Skip them.
if (isa<ParmVarDecl>(OrigDecl))
return;
E = E->IgnoreParens();
// Skip checking T a = a where T is not a record or reference type.
// Doing so is a way to silence uninitialized warnings.
if (!DirectInit && !cast<VarDecl>(OrigDecl)->getType()->isRecordType())
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
if (ICE->getCastKind() == CK_LValueToRValue)
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr()))
if (DRE->getDecl() == OrigDecl)
return;
SelfReferenceChecker(S, OrigDecl).CheckExpr(E);
}
} // end anonymous namespace
namespace {
// Simple wrapper to add the name of a variable or (if no variable is
// available) a DeclarationName into a diagnostic.
struct VarDeclOrName {
VarDecl *VDecl;
DeclarationName Name;
friend const Sema::SemaDiagnosticBuilder &
operator<<(const Sema::SemaDiagnosticBuilder &Diag, VarDeclOrName VN) {
return VN.VDecl ? Diag << VN.VDecl : Diag << VN.Name;
}
};
} // end anonymous namespace
QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
DeclarationName Name, QualType Type,
TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init) {
bool IsInitCapture = !VDecl;
assert((!VDecl || !VDecl->isInitCapture()) &&
"init captures are expected to be deduced prior to initialization");
VarDeclOrName VN{VDecl, Name};
DeducedType *Deduced = Type->getContainedDeducedType();
assert(Deduced && "deduceVarTypeFromInitializer for non-deduced type");
// C++11 [dcl.spec.auto]p3
if (!Init) {
assert(VDecl && "no init for init capture deduction?");
// Except for class argument deduction, and then for an initializing
// declaration only, i.e. no static at class scope or extern.
if (!isa<DeducedTemplateSpecializationType>(Deduced) ||
VDecl->hasExternalStorage() ||
VDecl->isStaticDataMember()) {
Diag(VDecl->getLocation(), diag::err_auto_var_requires_init)
<< VDecl->getDeclName() << Type;
return QualType();
}
}
ArrayRef<Expr*> DeduceInits;
if (Init)
DeduceInits = Init;
if (DirectInit) {
if (auto *PL = dyn_cast_or_null<ParenListExpr>(Init))
DeduceInits = PL->exprs();
}
if (isa<DeducedTemplateSpecializationType>(Deduced)) {
assert(VDecl && "non-auto type for init capture deduction?");
InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl);
InitializationKind Kind = InitializationKind::CreateForInit(
VDecl->getLocation(), DirectInit, Init);
// FIXME: Initialization should not be taking a mutable list of inits.
SmallVector<Expr*, 8> InitsCopy(DeduceInits.begin(), DeduceInits.end());
return DeduceTemplateSpecializationFromInitializer(TSI, Entity, Kind,
InitsCopy);
}
if (DirectInit) {
if (auto *IL = dyn_cast<InitListExpr>(Init))
DeduceInits = IL->inits();
}
// Deduction only works if we have exactly one source expression.
if (DeduceInits.empty()) {
// It isn't possible to write this directly, but it is possible to
// end up in this situation with "auto x(some_pack...);"
Diag(Init->getLocStart(), IsInitCapture
? diag::err_init_capture_no_expression
: diag::err_auto_var_init_no_expression)
<< VN << Type << Range;
return QualType();
}
if (DeduceInits.size() > 1) {
Diag(DeduceInits[1]->getLocStart(),
IsInitCapture ? diag::err_init_capture_multiple_expressions
: diag::err_auto_var_init_multiple_expressions)
<< VN << Type << Range;
return QualType();
}
Expr *DeduceInit = DeduceInits[0];
if (DirectInit && isa<InitListExpr>(DeduceInit)) {
Diag(Init->getLocStart(), IsInitCapture
? diag::err_init_capture_paren_braces
: diag::err_auto_var_init_paren_braces)
<< isa<InitListExpr>(Init) << VN << Type << Range;
return QualType();
}
// Expressions default to 'id' when we're in a debugger.
bool DefaultedAnyToId = false;
if (getLangOpts().DebuggerCastResultToId &&
Init->getType() == Context.UnknownAnyTy && !IsInitCapture) {
ExprResult Result = forceUnknownAnyToType(Init, Context.getObjCIdType());
if (Result.isInvalid()) {
return QualType();
}
Init = Result.get();
DefaultedAnyToId = true;
}
// C++ [dcl.decomp]p1:
// If the assignment-expression [...] has array type A and no ref-qualifier
// is present, e has type cv A
if (VDecl && isa<DecompositionDecl>(VDecl) &&
Context.hasSameUnqualifiedType(Type, Context.getAutoDeductType()) &&
DeduceInit->getType()->isConstantArrayType())
return Context.getQualifiedType(DeduceInit->getType(),
Type.getQualifiers());
QualType DeducedType;
if (DeduceAutoType(TSI, DeduceInit, DeducedType) == DAR_Failed) {
if (!IsInitCapture)
DiagnoseAutoDeductionFailure(VDecl, DeduceInit);
else if (isa<InitListExpr>(Init))
Diag(Range.getBegin(),
diag::err_init_capture_deduction_failure_from_init_list)
<< VN
<< (DeduceInit->getType().isNull() ? TSI->getType()
: DeduceInit->getType())
<< DeduceInit->getSourceRange();
else
Diag(Range.getBegin(), diag::err_init_capture_deduction_failure)
<< VN << TSI->getType()
<< (DeduceInit->getType().isNull() ? TSI->getType()
: DeduceInit->getType())
<< DeduceInit->getSourceRange();
}
// Warn if we deduced 'id'. 'auto' usually implies type-safety, but using
// 'id' instead of a specific object type prevents most of our usual
// checks.
// We only want to warn outside of template instantiations, though:
// inside a template, the 'id' could have come from a parameter.
if (!inTemplateInstantiation() && !DefaultedAnyToId && !IsInitCapture &&
!DeducedType.isNull() && DeducedType->isObjCIdType()) {
SourceLocation Loc = TSI->getTypeLoc().getBeginLoc();
Diag(Loc, diag::warn_auto_var_is_id) << VN << Range;
}
return DeducedType;
}
bool Sema::DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init) {
QualType DeducedType = deduceVarTypeFromInitializer(
VDecl, VDecl->getDeclName(), VDecl->getType(), VDecl->getTypeSourceInfo(),
VDecl->getSourceRange(), DirectInit, Init);
if (DeducedType.isNull()) {
VDecl->setInvalidDecl();
return true;
}
VDecl->setType(DeducedType);
assert(VDecl->isLinkageValid());
// In ARC, infer lifetime.
if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(VDecl))
VDecl->setInvalidDecl();
// If this is a redeclaration, check that the type we just deduced matches
// the previously declared type.
if (VarDecl *Old = VDecl->getPreviousDecl()) {
// We never need to merge the type, because we cannot form an incomplete
// array of auto, nor deduce such a type.
MergeVarDeclTypes(VDecl, Old, /*MergeTypeWithPrevious*/ false);
}
// Check the deduced type is valid for a variable declaration.
CheckVariableDeclarationType(VDecl);
return VDecl->isInvalidDecl();
}
/// AddInitializerToDecl - Adds the initializer Init to the
/// declaration dcl. If DirectInit is true, this is C++ direct
/// initialization rather than copy initialization.
void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// If there is no declaration, there was an error parsing it. Just ignore
// the initializer.
if (!RealDecl || RealDecl->isInvalidDecl()) {
CorrectDelayedTyposInExpr(Init, dyn_cast_or_null<VarDecl>(RealDecl));
return;
}
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(RealDecl)) {
// Pure-specifiers are handled in ActOnPureSpecifier.
Diag(Method->getLocation(), diag::err_member_function_initialization)
<< Method->getDeclName() << Init->getSourceRange();
Method->setInvalidDecl();
return;
}
VarDecl *VDecl = dyn_cast<VarDecl>(RealDecl);
if (!VDecl) {
assert(!isa<FieldDecl>(RealDecl) && "field init shouldn't get here");
Diag(RealDecl->getLocation(), diag::err_illegal_initializer);
RealDecl->setInvalidDecl();
return;
}
// C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
if (VDecl->getType()->isUndeducedType()) {
// Attempt typo correction early so that the type of the init expression can
// be deduced based on the chosen correction if the original init contains a
// TypoExpr.
ExprResult Res = CorrectDelayedTyposInExpr(Init, VDecl);
if (!Res.isUsable()) {
RealDecl->setInvalidDecl();
return;
}
Init = Res.get();
if (DeduceVariableDeclarationType(VDecl, DirectInit, Init))
return;
}
// dllimport cannot be used on variable definitions.
if (VDecl->hasAttr<DLLImportAttr>() && !VDecl->isStaticDataMember()) {
Diag(VDecl->getLocation(), diag::err_attribute_dllimport_data_definition);
VDecl->setInvalidDecl();
return;
}
if (VDecl->isLocalVarDecl() && VDecl->hasExternalStorage()) {
// C99 6.7.8p5. C++ has no such restriction, but that is a defect.
Diag(VDecl->getLocation(), diag::err_block_extern_cant_init);
VDecl->setInvalidDecl();
return;
}
if (!VDecl->getType()->isDependentType()) {
// A definition must end up with a complete type, which means it must be
// complete with the restriction that an array type might be completed by
// the initializer; note that later code assumes this restriction.
QualType BaseDeclType = VDecl->getType();
if (const ArrayType *Array = Context.getAsIncompleteArrayType(BaseDeclType))
BaseDeclType = Array->getElementType();
if (RequireCompleteType(VDecl->getLocation(), BaseDeclType,
diag::err_typecheck_decl_incomplete_type)) {
RealDecl->setInvalidDecl();
return;
}
// The variable can not have an abstract class type.
if (RequireNonAbstractType(VDecl->getLocation(), VDecl->getType(),
diag::err_abstract_type_in_decl,
AbstractVariableType))
VDecl->setInvalidDecl();
}
// If adding the initializer will turn this declaration into a definition,
// and we already have a definition for this variable, diagnose or otherwise
// handle the situation.
VarDecl *Def;
if ((Def = VDecl->getDefinition()) && Def != VDecl &&
(!VDecl->isStaticDataMember() || VDecl->isOutOfLine()) &&
!VDecl->isThisDeclarationADemotedDefinition() &&
checkVarDeclRedefinition(Def, VDecl))
return;
if (getLangOpts().CPlusPlus) {
// C++ [class.static.data]p4
// If a static data member is of const integral or const
// enumeration type, its declaration in the class definition can
// specify a constant-initializer which shall be an integral
// constant expression (5.19). In that case, the member can appear
// in integral constant expressions. The member shall still be
// defined in a namespace scope if it is used in the program and the
// namespace scope definition shall not contain an initializer.
//
// We already performed a redefinition check above, but for static
// data members we also need to check whether there was an in-class
// declaration with an initializer.
if (VDecl->isStaticDataMember() && VDecl->getCanonicalDecl()->hasInit()) {
Diag(Init->getExprLoc(), diag::err_static_data_member_reinitialization)
<< VDecl->getDeclName();
Diag(VDecl->getCanonicalDecl()->getInit()->getExprLoc(),
diag::note_previous_initializer)
<< 0;
return;
}
if (VDecl->hasLocalStorage())
setFunctionHasBranchProtectedScope();
if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer)) {
VDecl->setInvalidDecl();
return;
}
}
// OpenCL 1.1 6.5.2: "Variables allocated in the __local address space inside
// a kernel function cannot be initialized."
if (VDecl->getType().getAddressSpace() == LangAS::opencl_local) {
Diag(VDecl->getLocation(), diag::err_local_cant_init);
VDecl->setInvalidDecl();
return;
}
// Get the decls type and save a reference for later, since
// CheckInitializerTypes may change it.
QualType DclT = VDecl->getType(), SavT = DclT;
// Expressions default to 'id' when we're in a debugger
// and we are assigning it to a variable of Objective-C pointer type.
if (getLangOpts().DebuggerCastResultToId && DclT->isObjCObjectPointerType() &&
Init->getType() == Context.UnknownAnyTy) {
ExprResult Result = forceUnknownAnyToType(Init, Context.getObjCIdType());
if (Result.isInvalid()) {
VDecl->setInvalidDecl();
return;
}
Init = Result.get();
}
// Perform the initialization.
ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
if (!VDecl->isInvalidDecl()) {
InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl);
InitializationKind Kind = InitializationKind::CreateForInit(
VDecl->getLocation(), DirectInit, Init);
MultiExprArg Args = Init;
if (CXXDirectInit)
Args = MultiExprArg(CXXDirectInit->getExprs(),
CXXDirectInit->getNumExprs());
// Try to correct any TypoExprs in the initialization arguments.
for (size_t Idx = 0; Idx < Args.size(); ++Idx) {
ExprResult Res = CorrectDelayedTyposInExpr(
Args[Idx], VDecl, [this, Entity, Kind](Expr *E) {
InitializationSequence Init(*this, Entity, Kind, MultiExprArg(E));
return Init.Failed() ? ExprError() : E;
});
if (Res.isInvalid()) {
VDecl->setInvalidDecl();
} else if (Res.get() != Args[Idx]) {
Args[Idx] = Res.get();
}
}
if (VDecl->isInvalidDecl())
return;
InitializationSequence InitSeq(*this, Entity, Kind, Args,
/*TopLevelOfInitList=*/false,
/*TreatUnavailableAsInvalid=*/false);
ExprResult Result = InitSeq.Perform(*this, Entity, Kind, Args, &DclT);
if (Result.isInvalid()) {
VDecl->setInvalidDecl();
return;
}
Init = Result.getAs<Expr>();
}
// Check for self-references within variable initializers.
// Variables declared within a function/method body (except for references)
// are handled by a dataflow analysis.
if (!VDecl->hasLocalStorage() || VDecl->getType()->isRecordType() ||
VDecl->getType()->isReferenceType()) {
CheckSelfReference(*this, RealDecl, Init, DirectInit);
}
// If the type changed, it means we had an incomplete type that was
// completed by the initializer. For example:
// int ary[] = { 1, 3, 5 };
// "ary" transitions from an IncompleteArrayType to a ConstantArrayType.
if (!VDecl->isInvalidDecl() && (DclT != SavT))
VDecl->setType(DclT);
if (!VDecl->isInvalidDecl()) {
checkUnsafeAssigns(VDecl->getLocation(), VDecl->getType(), Init);
if (VDecl->hasAttr<BlocksAttr>())
checkRetainCycles(VDecl, Init);
// It is safe to assign a weak reference into a strong variable.
// Although this code can still have problems:
// id x = self.weakProp;
// id y = self.weakProp;
// we do not warn to warn spuriously when 'x' and 'y' are on separate
// paths through the function. This should be revisited if
// -Wrepeated-use-of-weak is made flow-sensitive.
if (FunctionScopeInfo *FSI = getCurFunction())
if ((VDecl->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
VDecl->getType().isNonWeakInMRRWithObjCWeak(Context)) &&
!Diags.isIgnored(diag::warn_arc_repeated_use_of_weak,
Init->getLocStart()))
FSI->markSafeWeakUse(Init);
}
// The initialization is usually a full-expression.
//
// FIXME: If this is a braced initialization of an aggregate, it is not
// an expression, and each individual field initializer is a separate
// full-expression. For instance, in:
//
// struct Temp { ~Temp(); };
// struct S { S(Temp); };
// struct T { S a, b; } t = { Temp(), Temp() }
//
// we should destroy the first Temp before constructing the second.
ExprResult Result = ActOnFinishFullExpr(Init, VDecl->getLocation(),
false,
VDecl->isConstexpr());
if (Result.isInvalid()) {
VDecl->setInvalidDecl();
return;
}
Init = Result.get();
// Attach the initializer to the decl.
VDecl->setInit(Init);
if (VDecl->isLocalVarDecl()) {
// Don't check the initializer if the declaration is malformed.
if (VDecl->isInvalidDecl()) {
// do nothing
// OpenCL v1.2 s6.5.3: __constant locals must be constant-initialized.
// This is true even in OpenCL C++.
} else if (VDecl->getType().getAddressSpace() == LangAS::opencl_constant) {
CheckForConstantInitializer(Init, DclT);
// Otherwise, C++ does not restrict the initializer.
} else if (getLangOpts().CPlusPlus) {
// do nothing
// C99 6.7.8p4: All the expressions in an initializer for an object that has
// static storage duration shall be constant expressions or string literals.
} else if (VDecl->getStorageClass() == SC_Static) {
CheckForConstantInitializer(Init, DclT);
// C89 is stricter than C99 for aggregate initializers.
// C89 6.5.7p3: All the expressions [...] in an initializer list
// for an object that has aggregate or union type shall be
// constant expressions.
} else if (!getLangOpts().C99 && VDecl->getType()->isAggregateType() &&
isa<InitListExpr>(Init)) {
const Expr *Culprit;
if (!Init->isConstantInitializer(Context, false, &Culprit)) {
Diag(Culprit->getExprLoc(),
diag::ext_aggregate_init_not_constant)
<< Culprit->getSourceRange();
}
}
} else if (VDecl->isStaticDataMember() && !VDecl->isInline() &&
VDecl->getLexicalDeclContext()->isRecord()) {
// This is an in-class initialization for a static data member, e.g.,
//
// struct S {
// static const int value = 17;
// };
// C++ [class.mem]p4:
// A member-declarator can contain a constant-initializer only
// if it declares a static member (9.4) of const integral or
// const enumeration type, see 9.4.2.
//
// C++11 [class.static.data]p3:
// If a non-volatile non-inline const static data member is of integral
// or enumeration type, its declaration in the class definition can
// specify a brace-or-equal-initializer in which every initializer-clause
// that is an assignment-expression is a constant expression. A static
// data member of literal type can be declared in the class definition
// with the constexpr specifier; if so, its declaration shall specify a
// brace-or-equal-initializer in which every initializer-clause that is
// an assignment-expression is a constant expression.
// Do nothing on dependent types.
if (DclT->isDependentType()) {
// Allow any 'static constexpr' members, whether or not they are of literal
// type. We separately check that every constexpr variable is of literal
// type.
} else if (VDecl->isConstexpr()) {
// Require constness.
} else if (!DclT.isConstQualified()) {
Diag(VDecl->getLocation(), diag::err_in_class_initializer_non_const)
<< Init->getSourceRange();
VDecl->setInvalidDecl();
// We allow integer constant expressions in all cases.
} else if (DclT->isIntegralOrEnumerationType()) {
// Check whether the expression is a constant expression.
SourceLocation Loc;
if (getLangOpts().CPlusPlus11 && DclT.isVolatileQualified())
// In C++11, a non-constexpr const static data member with an
// in-class initializer cannot be volatile.
Diag(VDecl->getLocation(), diag::err_in_class_initializer_volatile);
else if (Init->isValueDependent())
; // Nothing to check.
else if (Init->isIntegerConstantExpr(Context, &Loc))
; // Ok, it's an ICE!
else if (Init->getType()->isScopedEnumeralType() &&
Init->isCXX11ConstantExpr(Context))
; // Ok, it is a scoped-enum constant expression.
else if (Init->isEvaluatable(Context)) {
// If we can constant fold the initializer through heroics, accept it,
// but report this as a use of an extension for -pedantic.
Diag(Loc, diag::ext_in_class_initializer_non_constant)
<< Init->getSourceRange();
} else {
// Otherwise, this is some crazy unknown case. Report the issue at the
// location provided by the isIntegerConstantExpr failed check.
Diag(Loc, diag::err_in_class_initializer_non_constant)
<< Init->getSourceRange();
VDecl->setInvalidDecl();
}
// We allow foldable floating-point constants as an extension.
} else if (DclT->isFloatingType()) { // also permits complex, which is ok
// In C++98, this is a GNU extension. In C++11, it is not, but we support
// it anyway and provide a fixit to add the 'constexpr'.
if (getLangOpts().CPlusPlus11) {
Diag(VDecl->getLocation(),
diag::ext_in_class_initializer_float_type_cxx11)
<< DclT << Init->getSourceRange();
Diag(VDecl->getLocStart(),
diag::note_in_class_initializer_float_type_cxx11)
<< FixItHint::CreateInsertion(VDecl->getLocStart(), "constexpr ");
} else {
Diag(VDecl->getLocation(), diag::ext_in_class_initializer_float_type)
<< DclT << Init->getSourceRange();
if (!Init->isValueDependent() && !Init->isEvaluatable(Context)) {
Diag(Init->getExprLoc(), diag::err_in_class_initializer_non_constant)
<< Init->getSourceRange();
VDecl->setInvalidDecl();
}
}
// Suggest adding 'constexpr' in C++11 for literal types.
} else if (getLangOpts().CPlusPlus11 && DclT->isLiteralType(Context)) {
Diag(VDecl->getLocation(), diag::err_in_class_initializer_literal_type)
<< DclT << Init->getSourceRange()
<< FixItHint::CreateInsertion(VDecl->getLocStart(), "constexpr ");
VDecl->setConstexpr(true);
} else {
Diag(VDecl->getLocation(), diag::err_in_class_initializer_bad_type)
<< DclT << Init->getSourceRange();
VDecl->setInvalidDecl();
}
} else if (VDecl->isFileVarDecl()) {
// In C, extern is typically used to avoid tentative definitions when
// declaring variables in headers, but adding an intializer makes it a
// definition. This is somewhat confusing, so GCC and Clang both warn on it.
// In C++, extern is often used to give implictly static const variables
// external linkage, so don't warn in that case. If selectany is present,
// this might be header code intended for C and C++ inclusion, so apply the
// C++ rules.
if (VDecl->getStorageClass() == SC_Extern &&
((!getLangOpts().CPlusPlus && !VDecl->hasAttr<SelectAnyAttr>()) ||
!Context.getBaseElementType(VDecl->getType()).isConstQualified()) &&
!(getLangOpts().CPlusPlus && VDecl->isExternC()) &&
!isTemplateInstantiation(VDecl->getTemplateSpecializationKind()))
Diag(VDecl->getLocation(), diag::warn_extern_init);
// C99 6.7.8p4. All file scoped initializers need to be constant.
if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl())
CheckForConstantInitializer(Init, DclT);
}
// We will represent direct-initialization similarly to copy-initialization:
// int x(1); -as-> int x = 1;
// ClassType x(a,b,c); -as-> ClassType x = ClassType(a,b,c);
//
// Clients that want to distinguish between the two forms, can check for
// direct initializer using VarDecl::getInitStyle().
// A major benefit is that clients that don't particularly care about which
// exactly form was it (like the CodeGen) can handle both cases without
// special case code.
// C++ 8.5p11:
// The form of initialization (using parentheses or '=') is generally
// insignificant, but does matter when the entity being initialized has a
// class type.
if (CXXDirectInit) {
assert(DirectInit && "Call-style initializer must be direct init.");
VDecl->setInitStyle(VarDecl::CallInit);
} else if (DirectInit) {
// This must be list-initialization. No other way is direct-initialization.
VDecl->setInitStyle(VarDecl::ListInit);
}
CheckCompleteVariableDeclaration(VDecl);
}
/// ActOnInitializerError - Given that there was an error parsing an
/// initializer for the given declaration, try to return to some form
/// of sanity.
void Sema::ActOnInitializerError(Decl *D) {
// Our main concern here is re-establishing invariants like "a
// variable's type is either dependent or complete".
if (!D || D->isInvalidDecl()) return;
VarDecl *VD = dyn_cast<VarDecl>(D);
if (!VD) return;
// Bindings are not usable if we can't make sense of the initializer.
if (auto *DD = dyn_cast<DecompositionDecl>(D))
for (auto *BD : DD->bindings())
BD->setInvalidDecl();
// Auto types are meaningless if we can't make sense of the initializer.
if (ParsingInitForAutoVars.count(D)) {
D->setInvalidDecl();
return;
}
QualType Ty = VD->getType();
if (Ty->isDependentType()) return;
// Require a complete type.
if (RequireCompleteType(VD->getLocation(),
Context.getBaseElementType(Ty),
diag::err_typecheck_decl_incomplete_type)) {
VD->setInvalidDecl();
return;
}
// Require a non-abstract type.
if (RequireNonAbstractType(VD->getLocation(), Ty,
diag::err_abstract_type_in_decl,
AbstractVariableType)) {
VD->setInvalidDecl();
return;
}
// Don't bother complaining about constructors or destructors,
// though.
}
void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
// If there is no declaration, there was an error parsing it. Just ignore it.
if (!RealDecl)
return;
if (VarDecl *Var = dyn_cast<VarDecl>(RealDecl)) {
QualType Type = Var->getType();
// C++1z [dcl.dcl]p1 grammar implies that an initializer is mandatory.
if (isa<DecompositionDecl>(RealDecl)) {
Diag(Var->getLocation(), diag::err_decomp_decl_requires_init) << Var;
Var->setInvalidDecl();
return;
}
if (Type->isUndeducedType() &&
DeduceVariableDeclarationType(Var, false, nullptr))
return;
// C++11 [class.static.data]p3: A static data member can be declared with
// the constexpr specifier; if so, its declaration shall specify
// a brace-or-equal-initializer.
// C++11 [dcl.constexpr]p1: The constexpr specifier shall be applied only to
// the definition of a variable [...] or the declaration of a static data
// member.
if (Var->isConstexpr() && !Var->isThisDeclarationADefinition() &&
!Var->isThisDeclarationADemotedDefinition()) {
if (Var->isStaticDataMember()) {
// C++1z removes the relevant rule; the in-class declaration is always
// a definition there.
if (!getLangOpts().CPlusPlus17) {
Diag(Var->getLocation(),
diag::err_constexpr_static_mem_var_requires_init)
<< Var->getDeclName();
Var->setInvalidDecl();
return;
}
} else {
Diag(Var->getLocation(), diag::err_invalid_constexpr_var_decl);
Var->setInvalidDecl();
return;
}
}
// OpenCL v1.1 s6.5.3: variables declared in the constant address space must
// be initialized.
if (!Var->isInvalidDecl() &&
Var->getType().getAddressSpace() == LangAS::opencl_constant &&
Var->getStorageClass() != SC_Extern && !Var->getInit()) {
Diag(Var->getLocation(), diag::err_opencl_constant_no_init);
Var->setInvalidDecl();
return;
}
switch (Var->isThisDeclarationADefinition()) {
case VarDecl::Definition:
if (!Var->isStaticDataMember() || !Var->getAnyInitializer())
break;
// We have an out-of-line definition of a static data member
// that has an in-class initializer, so we type-check this like
// a declaration.
//
LLVM_FALLTHROUGH;
case VarDecl::DeclarationOnly:
// It's only a declaration.
// Block scope. C99 6.7p7: If an identifier for an object is
// declared with no linkage (C99 6.2.2p6), the type for the
// object shall be complete.
if (!Type->isDependentType() && Var->isLocalVarDecl() &&
!Var->hasLinkage() && !Var->isInvalidDecl() &&
RequireCompleteType(Var->getLocation(), Type,
diag::err_typecheck_decl_incomplete_type))
Var->setInvalidDecl();
// Make sure that the type is not abstract.
if (!Type->isDependentType() && !Var->isInvalidDecl() &&
RequireNonAbstractType(Var->getLocation(), Type,
diag::err_abstract_type_in_decl,
AbstractVariableType))
Var->setInvalidDecl();
if (!Type->isDependentType() && !Var->isInvalidDecl() &&
Var->getStorageClass() == SC_PrivateExtern) {
Diag(Var->getLocation(), diag::warn_private_extern);
Diag(Var->getLocation(), diag::note_private_extern);
}
return;
case VarDecl::TentativeDefinition:
// File scope. C99 6.9.2p2: A declaration of an identifier for an
// object that has file scope without an initializer, and without a
// storage-class specifier or with the storage-class specifier "static",
// constitutes a tentative definition. Note: A tentative definition with
// external linkage is valid (C99 6.2.2p5).
if (!Var->isInvalidDecl()) {
if (const IncompleteArrayType *ArrayT
= Context.getAsIncompleteArrayType(Type)) {
if (RequireCompleteType(Var->getLocation(),
ArrayT->getElementType(),
diag::err_illegal_decl_array_incomplete_type))
Var->setInvalidDecl();
} else if (Var->getStorageClass() == SC_Static) {
// C99 6.9.2p3: If the declaration of an identifier for an object is
// a tentative definition and has internal linkage (C99 6.2.2p3), the
// declared type shall not be an incomplete type.
// NOTE: code such as the following
// static struct s;
// struct s { int a; };
// is accepted by gcc. Hence here we issue a warning instead of
// an error and we do not invalidate the static declaration.
// NOTE: to avoid multiple warnings, only check the first declaration.
if (Var->isFirstDecl())
RequireCompleteType(Var->getLocation(), Type,
diag::ext_typecheck_decl_incomplete_type);
}
}
// Record the tentative definition; we're done.
if (!Var->isInvalidDecl())
TentativeDefinitions.push_back(Var);
return;
}
// Provide a specific diagnostic for uninitialized variable
// definitions with incomplete array type.
if (Type->isIncompleteArrayType()) {
Diag(Var->getLocation(),
diag::err_typecheck_incomplete_array_needs_initializer);
Var->setInvalidDecl();
return;
}
// Provide a specific diagnostic for uninitialized variable
// definitions with reference type.
if (Type->isReferenceType()) {
Diag(Var->getLocation(), diag::err_reference_var_requires_init)
<< Var->getDeclName()
<< SourceRange(Var->getLocation(), Var->getLocation());
Var->setInvalidDecl();
return;
}
// Do not attempt to type-check the default initializer for a
// variable with dependent type.
if (Type->isDependentType())
return;
if (Var->isInvalidDecl())
return;
if (!Var->hasAttr<AliasAttr>()) {
if (RequireCompleteType(Var->getLocation(),
Context.getBaseElementType(Type),
diag::err_typecheck_decl_incomplete_type)) {
Var->setInvalidDecl();
return;
}
} else {
return;
}
// The variable can not have an abstract class type.
if (RequireNonAbstractType(Var->getLocation(), Type,
diag::err_abstract_type_in_decl,
AbstractVariableType)) {
Var->setInvalidDecl();
return;
}
// Check for jumps past the implicit initializer. C++0x
// clarifies that this applies to a "variable with automatic
// storage duration", not a "local variable".
// C++11 [stmt.dcl]p3
// A program that jumps from a point where a variable with automatic
// storage duration is not in scope to a point where it is in scope is
// ill-formed unless the variable has scalar type, class type with a
// trivial default constructor and a trivial destructor, a cv-qualified
// version of one of these types, or an array of one of the preceding
// types and is declared without an initializer.
if (getLangOpts().CPlusPlus && Var->hasLocalStorage()) {
if (const RecordType *Record
= Context.getBaseElementType(Type)->getAs<RecordType>()) {
CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record->getDecl());
// Mark the function (if we're in one) for further checking even if the
// looser rules of C++11 do not require such checks, so that we can
// diagnose incompatibilities with C++98.
if (!CXXRecord->isPOD())
setFunctionHasBranchProtectedScope();
}
}
// C++03 [dcl.init]p9:
// If no initializer is specified for an object, and the
// object is of (possibly cv-qualified) non-POD class type (or
// array thereof), the object shall be default-initialized; if
// the object is of const-qualified type, the underlying class
// type shall have a user-declared default
// constructor. Otherwise, if no initializer is specified for
// a non- static object, the object and its subobjects, if
// any, have an indeterminate initial value); if the object
// or any of its subobjects are of const-qualified type, the
// program is ill-formed.
// C++0x [dcl.init]p11:
// If no initializer is specified for an object, the object is
// default-initialized; [...].
InitializedEntity Entity = InitializedEntity::InitializeVariable(Var);
InitializationKind Kind
= InitializationKind::CreateDefault(Var->getLocation());
InitializationSequence InitSeq(*this, Entity, Kind, None);
ExprResult Init = InitSeq.Perform(*this, Entity, Kind, None);
if (Init.isInvalid())
Var->setInvalidDecl();
else if (Init.get()) {
Var->setInit(MaybeCreateExprWithCleanups(Init.get()));
// This is important for template substitution.
Var->setInitStyle(VarDecl::CallInit);
}
CheckCompleteVariableDeclaration(Var);
}
}
void Sema::ActOnCXXForRangeDecl(Decl *D) {
// If there is no declaration, there was an error parsing it. Ignore it.
if (!D)
return;
VarDecl *VD = dyn_cast<VarDecl>(D);
if (!VD) {
Diag(D->getLocation(), diag::err_for_range_decl_must_be_var);
D->setInvalidDecl();
return;
}
VD->setCXXForRangeDecl(true);
// for-range-declaration cannot be given a storage class specifier.
int Error = -1;
switch (VD->getStorageClass()) {
case SC_None:
break;
case SC_Extern:
Error = 0;
break;
case SC_Static:
Error = 1;
break;
case SC_PrivateExtern:
Error = 2;
break;
case SC_Auto:
Error = 3;
break;
case SC_Register:
Error = 4;
break;
}
if (Error != -1) {
Diag(VD->getOuterLocStart(), diag::err_for_range_storage_class)
<< VD->getDeclName() << Error;
D->setInvalidDecl();
}
}
StmtResult
Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd) {
// C++1y [stmt.iter]p1:
// A range-based for statement of the form
// for ( for-range-identifier : for-range-initializer ) statement
// is equivalent to
// for ( auto&& for-range-identifier : for-range-initializer ) statement
DeclSpec DS(Attrs.getPool().getFactory());
const char *PrevSpec;
unsigned DiagID;
DS.SetTypeSpecType(DeclSpec::TST_auto, IdentLoc, PrevSpec, DiagID,
getPrintingPolicy());
Declarator D(DS, DeclaratorContext::ForContext);
D.SetIdentifier(Ident, IdentLoc);
D.takeAttributes(Attrs, AttrEnd);
ParsedAttributes EmptyAttrs(Attrs.getPool().getFactory());
D.AddTypeInfo(DeclaratorChunk::getReference(0, IdentLoc, /*lvalue*/ false),
IdentLoc);
Decl *Var = ActOnDeclarator(S, D);
cast<VarDecl>(Var)->setCXXForRangeDecl(true);
FinalizeDeclaration(Var);
return ActOnDeclStmt(FinalizeDeclaratorGroup(S, DS, Var), IdentLoc,
AttrEnd.isValid() ? AttrEnd : IdentLoc);
}
void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
if (var->isInvalidDecl()) return;
if (getLangOpts().OpenCL) {
// OpenCL v2.0 s6.12.5 - Every block variable declaration must have an
// initialiser
if (var->getTypeSourceInfo()->getType()->isBlockPointerType() &&
!var->hasInit()) {
Diag(var->getLocation(), diag::err_opencl_invalid_block_declaration)
<< 1 /*Init*/;
var->setInvalidDecl();
return;
}
}
// In Objective-C, don't allow jumps past the implicit initialization of a
// local retaining variable.
if (getLangOpts().ObjC1 &&
var->hasLocalStorage()) {
switch (var->getType().getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Autoreleasing:
break;
case Qualifiers::OCL_Weak:
case Qualifiers::OCL_Strong:
setFunctionHasBranchProtectedScope();
break;
}
}
if (var->hasLocalStorage() &&
var->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
setFunctionHasBranchProtectedScope();
// Warn about externally-visible variables being defined without a
// prior declaration. We only want to do this for global
// declarations, but we also specifically need to avoid doing it for
// class members because the linkage of an anonymous class can
// change if it's later given a typedef name.
if (var->isThisDeclarationADefinition() &&
var->getDeclContext()->getRedeclContext()->isFileContext() &&
var->isExternallyVisible() && var->hasLinkage() &&
!var->isInline() && !var->getDescribedVarTemplate() &&
!isTemplateInstantiation(var->getTemplateSpecializationKind()) &&
!getDiagnostics().isIgnored(diag::warn_missing_variable_declarations,
var->getLocation())) {
// Find a previous declaration that's not a definition.
VarDecl *prev = var->getPreviousDecl();
while (prev && prev->isThisDeclarationADefinition())
prev = prev->getPreviousDecl();
if (!prev)
Diag(var->getLocation(), diag::warn_missing_variable_declarations) << var;
}
// Cache the result of checking for constant initialization.
Optional<bool> CacheHasConstInit;
const Expr *CacheCulprit;
auto checkConstInit = [&]() mutable {
if (!CacheHasConstInit)
CacheHasConstInit = var->getInit()->isConstantInitializer(
Context, var->getType()->isReferenceType(), &CacheCulprit);
return *CacheHasConstInit;
};
if (var->getTLSKind() == VarDecl::TLS_Static) {
if (var->getType().isDestructedType()) {
// GNU C++98 edits for __thread, [basic.start.term]p3:
// The type of an object with thread storage duration shall not
// have a non-trivial destructor.
Diag(var->getLocation(), diag::err_thread_nontrivial_dtor);
if (getLangOpts().CPlusPlus11)
Diag(var->getLocation(), diag::note_use_thread_local);
} else if (getLangOpts().CPlusPlus && var->hasInit()) {
if (!checkConstInit()) {
// GNU C++98 edits for __thread, [basic.start.init]p4:
// An object of thread storage duration shall not require dynamic
// initialization.
// FIXME: Need strict checking here.
Diag(CacheCulprit->getExprLoc(), diag::err_thread_dynamic_init)
<< CacheCulprit->getSourceRange();
if (getLangOpts().CPlusPlus11)
Diag(var->getLocation(), diag::note_use_thread_local);
}
}
}
// Apply section attributes and pragmas to global variables.
bool GlobalStorage = var->hasGlobalStorage();
if (GlobalStorage && var->isThisDeclarationADefinition() &&
!inTemplateInstantiation()) {
PragmaStack<StringLiteral *> *Stack = nullptr;
int SectionFlags = ASTContext::PSF_Implicit | ASTContext::PSF_Read;
if (var->getType().isConstQualified())
Stack = &ConstSegStack;
else if (!var->getInit()) {
Stack = &BSSSegStack;
SectionFlags |= ASTContext::PSF_Write;
} else {
Stack = &DataSegStack;
SectionFlags |= ASTContext::PSF_Write;
}
if (Stack->CurrentValue && !var->hasAttr<SectionAttr>()) {
var->addAttr(SectionAttr::CreateImplicit(
Context, SectionAttr::Declspec_allocate,
Stack->CurrentValue->getString(), Stack->CurrentPragmaLocation));
}
if (const SectionAttr *SA = var->getAttr<SectionAttr>())
if (UnifySection(SA->getName(), SectionFlags, var))
var->dropAttr<SectionAttr>();
// Apply the init_seg attribute if this has an initializer. If the
// initializer turns out to not be dynamic, we'll end up ignoring this
// attribute.
if (CurInitSeg && var->getInit())
var->addAttr(InitSegAttr::CreateImplicit(Context, CurInitSeg->getString(),
CurInitSegLoc));
}
// All the following checks are C++ only.
if (!getLangOpts().CPlusPlus) {
// If this variable must be emitted, add it as an initializer for the
// current module.
if (Context.DeclMustBeEmitted(var) && !ModuleScopes.empty())
Context.addModuleInitializer(ModuleScopes.back().Module, var);
return;
}
if (auto *DD = dyn_cast<DecompositionDecl>(var))
CheckCompleteDecompositionDeclaration(DD);
QualType type = var->getType();
if (type->isDependentType()) return;
// __block variables might require us to capture a copy-initializer.
if (var->hasAttr<BlocksAttr>()) {
// It's currently invalid to ever have a __block variable with an
// array type; should we diagnose that here?
// Regardless, we don't want to ignore array nesting when
// constructing this copy.
if (type->isStructureOrClassType()) {
EnterExpressionEvaluationContext scope(
*this, ExpressionEvaluationContext::PotentiallyEvaluated);
SourceLocation poi = var->getLocation();
Expr *varRef =new (Context) DeclRefExpr(var, false, type, VK_LValue, poi);
ExprResult result
= PerformMoveOrCopyInitialization(
InitializedEntity::InitializeBlock(poi, type, false),
var, var->getType(), varRef, /*AllowNRVO=*/true);
if (!result.isInvalid()) {
result = MaybeCreateExprWithCleanups(result);
Expr *init = result.getAs<Expr>();
Context.setBlockVarCopyInits(var, init);
}
}
}
Expr *Init = var->getInit();
bool IsGlobal = GlobalStorage && !var->isStaticLocal();
QualType baseType = Context.getBaseElementType(type);
if (Init && !Init->isValueDependent()) {
if (var->isConstexpr()) {
SmallVector<PartialDiagnosticAt, 8> Notes;
if (!var->evaluateValue(Notes) || !var->isInitICE()) {
SourceLocation DiagLoc = var->getLocation();
// If the note doesn't add any useful information other than a source
// location, fold it into the primary diagnostic.
if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
diag::note_invalid_subexpr_in_const_expr) {
DiagLoc = Notes[0].first;
Notes.clear();
}
Diag(DiagLoc, diag::err_constexpr_var_requires_const_init)
<< var << Init->getSourceRange();
for (unsigned I = 0, N = Notes.size(); I != N; ++I)
Diag(Notes[I].first, Notes[I].second);
}
} else if (var->isUsableInConstantExpressions(Context)) {
// Check whether the initializer of a const variable of integral or
// enumeration type is an ICE now, since we can't tell whether it was
// initialized by a constant expression if we check later.
var->checkInitIsICE();
}
// Don't emit further diagnostics about constexpr globals since they
// were just diagnosed.
if (!var->isConstexpr() && GlobalStorage &&
var->hasAttr<RequireConstantInitAttr>()) {
// FIXME: Need strict checking in C++03 here.
bool DiagErr = getLangOpts().CPlusPlus11
? !var->checkInitIsICE() : !checkConstInit();
if (DiagErr) {
auto attr = var->getAttr<RequireConstantInitAttr>();
Diag(var->getLocation(), diag::err_require_constant_init_failed)
<< Init->getSourceRange();
Diag(attr->getLocation(), diag::note_declared_required_constant_init_here)
<< attr->getRange();
if (getLangOpts().CPlusPlus11) {
APValue Value;
SmallVector<PartialDiagnosticAt, 8> Notes;
Init->EvaluateAsInitializer(Value, getASTContext(), var, Notes);
for (auto &it : Notes)
Diag(it.first, it.second);
} else {
Diag(CacheCulprit->getExprLoc(),
diag::note_invalid_subexpr_in_const_expr)
<< CacheCulprit->getSourceRange();
}
}
}
else if (!var->isConstexpr() && IsGlobal &&
!getDiagnostics().isIgnored(diag::warn_global_constructor,
var->getLocation())) {
// Warn about globals which don't have a constant initializer. Don't
// warn about globals with a non-trivial destructor because we already
// warned about them.
CXXRecordDecl *RD = baseType->getAsCXXRecordDecl();
if (!(RD && !RD->hasTrivialDestructor())) {
if (!checkConstInit())
Diag(var->getLocation(), diag::warn_global_constructor)
<< Init->getSourceRange();
}
}
}
// Require the destructor.
if (const RecordType *recordType = baseType->getAs<RecordType>())
FinalizeVarWithDestructor(var, recordType);
// If this variable must be emitted, add it as an initializer for the current
// module.
if (Context.DeclMustBeEmitted(var) && !ModuleScopes.empty())
Context.addModuleInitializer(ModuleScopes.back().Module, var);
}
/// Determines if a variable's alignment is dependent.
static bool hasDependentAlignment(VarDecl *VD) {
if (VD->getType()->isDependentType())
return true;
for (auto *I : VD->specific_attrs<AlignedAttr>())
if (I->isAlignmentDependent())
return true;
return false;
}
/// FinalizeDeclaration - called by ParseDeclarationAfterDeclarator to perform
/// any semantic actions necessary after any initializer has been attached.
void Sema::FinalizeDeclaration(Decl *ThisDecl) {
// Note that we are no longer parsing the initializer for this declaration.
ParsingInitForAutoVars.erase(ThisDecl);
VarDecl *VD = dyn_cast_or_null<VarDecl>(ThisDecl);
if (!VD)
return;
// Apply an implicit SectionAttr if '#pragma clang section bss|data|rodata' is active
if (VD->hasGlobalStorage() && VD->isThisDeclarationADefinition() &&
!inTemplateInstantiation() && !VD->hasAttr<SectionAttr>()) {
if (PragmaClangBSSSection.Valid)
VD->addAttr(PragmaClangBSSSectionAttr::CreateImplicit(Context,
PragmaClangBSSSection.SectionName,
PragmaClangBSSSection.PragmaLocation));
if (PragmaClangDataSection.Valid)
VD->addAttr(PragmaClangDataSectionAttr::CreateImplicit(Context,
PragmaClangDataSection.SectionName,
PragmaClangDataSection.PragmaLocation));
if (PragmaClangRodataSection.Valid)
VD->addAttr(PragmaClangRodataSectionAttr::CreateImplicit(Context,
PragmaClangRodataSection.SectionName,
PragmaClangRodataSection.PragmaLocation));
}
if (auto *DD = dyn_cast<DecompositionDecl>(ThisDecl)) {
for (auto *BD : DD->bindings()) {
FinalizeDeclaration(BD);
}
}
checkAttributesAfterMerging(*this, *VD);
// Perform TLS alignment check here after attributes attached to the variable
// which may affect the alignment have been processed. Only perform the check
// if the target has a maximum TLS alignment (zero means no constraints).
if (unsigned MaxAlign = Context.getTargetInfo().getMaxTLSAlign()) {
// Protect the check so that it's not performed on dependent types and
// dependent alignments (we can't determine the alignment in that case).
if (VD->getTLSKind() && !hasDependentAlignment(VD) &&
!VD->isInvalidDecl()) {
CharUnits MaxAlignChars = Context.toCharUnitsFromBits(MaxAlign);
if (Context.getDeclAlign(VD) > MaxAlignChars) {
Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum)
<< (unsigned)Context.getDeclAlign(VD).getQuantity() << VD
<< (unsigned)MaxAlignChars.getQuantity();
}
}
}
if (VD->isStaticLocal()) {
if (FunctionDecl *FD =
dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod())) {
// Static locals inherit dll attributes from their function.
if (Attr *A = getDLLAttr(FD)) {
auto *NewAttr = cast<InheritableAttr>(A->clone(getASTContext()));
NewAttr->setInherited(true);
VD->addAttr(NewAttr);
}
// CUDA 8.0 E.3.9.4: Within the body of a __device__ or __global__
// function, only __shared__ variables or variables without any device
// memory qualifiers may be declared with static storage class.
// Note: It is unclear how a function-scope non-const static variable
// without device memory qualifier is implemented, therefore only static
// const variable without device memory qualifier is allowed.
[&]() {
if (!getLangOpts().CUDA)
return;
if (VD->hasAttr<CUDASharedAttr>())
return;
if (VD->getType().isConstQualified() &&
!(VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>()))
return;
if (CUDADiagIfDeviceCode(VD->getLocation(),
diag::err_device_static_local_var)
<< CurrentCUDATarget())
VD->setInvalidDecl();
}();
}
}
// Perform check for initializers of device-side global variables.
// CUDA allows empty constructors as initializers (see E.2.3.1, CUDA
// 7.5). We must also apply the same checks to all __shared__
// variables whether they are local or not. CUDA also allows
// constant initializers for __constant__ and __device__ variables.
if (getLangOpts().CUDA)
checkAllowedCUDAInitializer(VD);
// Grab the dllimport or dllexport attribute off of the VarDecl.
const InheritableAttr *DLLAttr = getDLLAttr(VD);
// Imported static data members cannot be defined out-of-line.
if (const auto *IA = dyn_cast_or_null<DLLImportAttr>(DLLAttr)) {
if (VD->isStaticDataMember() && VD->isOutOfLine() &&
VD->isThisDeclarationADefinition()) {
// We allow definitions of dllimport class template static data members
// with a warning.
CXXRecordDecl *Context =
cast<CXXRecordDecl>(VD->getFirstDecl()->getDeclContext());
bool IsClassTemplateMember =
isa<ClassTemplatePartialSpecializationDecl>(Context) ||
Context->getDescribedClassTemplate();
Diag(VD->getLocation(),
IsClassTemplateMember
? diag::warn_attribute_dllimport_static_field_definition
: diag::err_attribute_dllimport_static_field_definition);
Diag(IA->getLocation(), diag::note_attribute);
if (!IsClassTemplateMember)
VD->setInvalidDecl();
}
}
// dllimport/dllexport variables cannot be thread local, their TLS index
// isn't exported with the variable.
if (DLLAttr && VD->getTLSKind()) {
auto *F = dyn_cast_or_null<FunctionDecl>(VD->getParentFunctionOrMethod());
if (F && getDLLAttr(F)) {
assert(VD->isStaticLocal());
// But if this is a static local in a dlimport/dllexport function, the
// function will never be inlined, which means the var would never be
// imported, so having it marked import/export is safe.
} else {
Diag(VD->getLocation(), diag::err_attribute_dll_thread_local) << VD
<< DLLAttr;
VD->setInvalidDecl();
}
}
if (UsedAttr *Attr = VD->getAttr<UsedAttr>()) {
if (!Attr->isInherited() && !VD->isThisDeclarationADefinition()) {
Diag(Attr->getLocation(), diag::warn_attribute_ignored) << Attr;
VD->dropAttr<UsedAttr>();
}
}
const DeclContext *DC = VD->getDeclContext();
// If there's a #pragma GCC visibility in scope, and this isn't a class
// member, set the visibility of this variable.
if (DC->getRedeclContext()->isFileContext() && VD->isExternallyVisible())
AddPushedVisibilityAttribute(VD);
// FIXME: Warn on unused var template partial specializations.
if (VD->isFileVarDecl() && !isa<VarTemplatePartialSpecializationDecl>(VD))
MarkUnusedFileScopedDecl(VD);
// Now we have parsed the initializer and can update the table of magic
// tag values.
if (!VD->hasAttr<TypeTagForDatatypeAttr>() ||
!VD->getType()->isIntegralOrEnumerationType())
return;
for (const auto *I : ThisDecl->specific_attrs<TypeTagForDatatypeAttr>()) {
const Expr *MagicValueExpr = VD->getInit();
if (!MagicValueExpr) {
continue;
}
llvm::APSInt MagicValueInt;
if (!MagicValueExpr->isIntegerConstantExpr(MagicValueInt, Context)) {
Diag(I->getRange().getBegin(),
diag::err_type_tag_for_datatype_not_ice)
<< LangOpts.CPlusPlus << MagicValueExpr->getSourceRange();
continue;
}
if (MagicValueInt.getActiveBits() > 64) {
Diag(I->getRange().getBegin(),
diag::err_type_tag_for_datatype_too_large)
<< LangOpts.CPlusPlus << MagicValueExpr->getSourceRange();
continue;
}
uint64_t MagicValue = MagicValueInt.getZExtValue();
RegisterTypeTagForDatatype(I->getArgumentKind(),
MagicValue,
I->getMatchingCType(),
I->getLayoutCompatible(),
I->getMustBeNull());
}
}
static bool hasDeducedAuto(DeclaratorDecl *DD) {
auto *VD = dyn_cast<VarDecl>(DD);
return VD && !VD->getType()->hasAutoForTrailingReturnType();
}
Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group) {
SmallVector<Decl*, 8> Decls;
if (DS.isTypeSpecOwned())
Decls.push_back(DS.getRepAsDecl());
DeclaratorDecl *FirstDeclaratorInGroup = nullptr;
DecompositionDecl *FirstDecompDeclaratorInGroup = nullptr;
bool DiagnosedMultipleDecomps = false;
DeclaratorDecl *FirstNonDeducedAutoInGroup = nullptr;
bool DiagnosedNonDeducedAuto = false;
for (unsigned i = 0, e = Group.size(); i != e; ++i) {
if (Decl *D = Group[i]) {
// For declarators, there are some additional syntactic-ish checks we need
// to perform.
if (auto *DD = dyn_cast<DeclaratorDecl>(D)) {
if (!FirstDeclaratorInGroup)
FirstDeclaratorInGroup = DD;
if (!FirstDecompDeclaratorInGroup)
FirstDecompDeclaratorInGroup = dyn_cast<DecompositionDecl>(D);
if (!FirstNonDeducedAutoInGroup && DS.hasAutoTypeSpec() &&
!hasDeducedAuto(DD))
FirstNonDeducedAutoInGroup = DD;
if (FirstDeclaratorInGroup != DD) {
// A decomposition declaration cannot be combined with any other
// declaration in the same group.
if (FirstDecompDeclaratorInGroup && !DiagnosedMultipleDecomps) {
Diag(FirstDecompDeclaratorInGroup->getLocation(),
diag::err_decomp_decl_not_alone)
<< FirstDeclaratorInGroup->getSourceRange()
<< DD->getSourceRange();
DiagnosedMultipleDecomps = true;
}
// A declarator that uses 'auto' in any way other than to declare a
// variable with a deduced type cannot be combined with any other
// declarator in the same group.
if (FirstNonDeducedAutoInGroup && !DiagnosedNonDeducedAuto) {
Diag(FirstNonDeducedAutoInGroup->getLocation(),
diag::err_auto_non_deduced_not_alone)
<< FirstNonDeducedAutoInGroup->getType()
->hasAutoForTrailingReturnType()
<< FirstDeclaratorInGroup->getSourceRange()
<< DD->getSourceRange();
DiagnosedNonDeducedAuto = true;
}
}
}
Decls.push_back(D);
}
}
if (DeclSpec::isDeclRep(DS.getTypeSpecType())) {
if (TagDecl *Tag = dyn_cast_or_null<TagDecl>(DS.getRepAsDecl())) {
handleTagNumbering(Tag, S);
if (FirstDeclaratorInGroup && !Tag->hasNameForLinkage() &&
getLangOpts().CPlusPlus)
Context.addDeclaratorForUnnamedTagDecl(Tag, FirstDeclaratorInGroup);
}
}
return BuildDeclaratorGroup(Decls);
}
/// BuildDeclaratorGroup - convert a list of declarations into a declaration
/// group, performing any necessary semantic checking.
Sema::DeclGroupPtrTy
Sema::BuildDeclaratorGroup(MutableArrayRef<Decl *> Group) {
// C++14 [dcl.spec.auto]p7: (DR1347)
// If the type that replaces the placeholder type is not the same in each
// deduction, the program is ill-formed.
if (Group.size() > 1) {
QualType Deduced;
VarDecl *DeducedDecl = nullptr;
for (unsigned i = 0, e = Group.size(); i != e; ++i) {
VarDecl *D = dyn_cast<VarDecl>(Group[i]);
if (!D || D->isInvalidDecl())
break;
DeducedType *DT = D->getType()->getContainedDeducedType();
if (!DT || DT->getDeducedType().isNull())
continue;
if (Deduced.isNull()) {
Deduced = DT->getDeducedType();
DeducedDecl = D;
} else if (!Context.hasSameType(DT->getDeducedType(), Deduced)) {
auto *AT = dyn_cast<AutoType>(DT);
Diag(D->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
diag::err_auto_different_deductions)
<< (AT ? (unsigned)AT->getKeyword() : 3)
<< Deduced << DeducedDecl->getDeclName()
<< DT->getDeducedType() << D->getDeclName()
<< DeducedDecl->getInit()->getSourceRange()
<< D->getInit()->getSourceRange();
D->setInvalidDecl();
break;
}
}
}
ActOnDocumentableDecls(Group);
return DeclGroupPtrTy::make(
DeclGroupRef::Create(Context, Group.data(), Group.size()));
}
void Sema::ActOnDocumentableDecl(Decl *D) {
ActOnDocumentableDecls(D);
}
void Sema::ActOnDocumentableDecls(ArrayRef<Decl *> Group) {
// Don't parse the comment if Doxygen diagnostics are ignored.
if (Group.empty() || !Group[0])
return;
if (Diags.isIgnored(diag::warn_doc_param_not_found,
Group[0]->getLocation()) &&
Diags.isIgnored(diag::warn_unknown_comment_command_name,
Group[0]->getLocation()))
return;
if (Group.size() >= 2) {
// This is a decl group. Normally it will contain only declarations
// produced from declarator list. But in case we have any definitions or
// additional declaration references:
// 'typedef struct S {} S;'
// 'typedef struct S *S;'
// 'struct S *pS;'
// FinalizeDeclaratorGroup adds these as separate declarations.
Decl *MaybeTagDecl = Group[0];
if (MaybeTagDecl && isa<TagDecl>(MaybeTagDecl)) {
Group = Group.slice(1);
}
}
// See if there are any new comments that are not attached to a decl.
ArrayRef<RawComment *> Comments = Context.getRawCommentList().getComments();
if (!Comments.empty() &&
!Comments.back()->isAttached()) {
// There is at least one comment that not attached to a decl.
// Maybe it should be attached to one of these decls?
//
// Note that this way we pick up not only comments that precede the
// declaration, but also comments that *follow* the declaration -- thanks to
// the lookahead in the lexer: we've consumed the semicolon and looked
// ahead through comments.
for (unsigned i = 0, e = Group.size(); i != e; ++i)
Context.getCommentForDecl(Group[i], &PP);
}
}
/// ActOnParamDeclarator - Called from Parser::ParseFunctionDeclarator()
/// to introduce parameters into function prototype scope.
Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
const DeclSpec &DS = D.getDeclSpec();
// Verify C99 6.7.5.3p2: The only SCS allowed is 'register'.
// C++03 [dcl.stc]p2 also permits 'auto'.
StorageClass SC = SC_None;
if (DS.getStorageClassSpec() == DeclSpec::SCS_register) {
SC = SC_Register;
// In C++11, the 'register' storage class specifier is deprecated.
// In C++17, it is not allowed, but we tolerate it as an extension.
if (getLangOpts().CPlusPlus11) {
Diag(DS.getStorageClassSpecLoc(),
getLangOpts().CPlusPlus17 ? diag::ext_register_storage_class
: diag::warn_deprecated_register)
<< FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
}
} else if (getLangOpts().CPlusPlus &&
DS.getStorageClassSpec() == DeclSpec::SCS_auto) {
SC = SC_Auto;
} else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified) {
Diag(DS.getStorageClassSpecLoc(),
diag::err_invalid_storage_class_in_func_decl);
D.getMutableDeclSpec().ClearStorageClassSpecs();
}
if (DeclSpec::TSCS TSCS = DS.getThreadStorageClassSpec())
Diag(DS.getThreadStorageClassSpecLoc(), diag::err_invalid_thread)
<< DeclSpec::getSpecifierName(TSCS);
if (DS.isInlineSpecified())
Diag(DS.getInlineSpecLoc(), diag::err_inline_non_function)
<< getLangOpts().CPlusPlus17;
if (DS.isConstexprSpecified())
Diag(DS.getConstexprSpecLoc(), diag::err_invalid_constexpr)
<< 0;
DiagnoseFunctionSpecifiers(DS);
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType parmDeclType = TInfo->getType();
if (getLangOpts().CPlusPlus) {
// Check that there are no default arguments inside the type of this
// parameter.
CheckExtraCXXDefaultArguments(D);
// Parameter declarators cannot be qualified (C++ [dcl.meaning]p1).
if (D.getCXXScopeSpec().isSet()) {
Diag(D.getIdentifierLoc(), diag::err_qualified_param_declarator)
<< D.getCXXScopeSpec().getRange();
D.getCXXScopeSpec().clear();
}
}
// Ensure we have a valid name
IdentifierInfo *II = nullptr;
if (D.hasName()) {
II = D.getIdentifier();
if (!II) {
Diag(D.getIdentifierLoc(), diag::err_bad_parameter_name)
<< GetNameForDeclarator(D).getName();
D.setInvalidType(true);
}
}
// Check for redeclaration of parameters, e.g. int foo(int x, int x);
if (II) {
LookupResult R(*this, II, D.getIdentifierLoc(), LookupOrdinaryName,
ForVisibleRedeclaration);
LookupName(R, S);
if (R.isSingleResult()) {
NamedDecl *PrevDecl = R.getFoundDecl();
if (PrevDecl->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
// Just pretend that we didn't see the previous declaration.
PrevDecl = nullptr;
} else if (S->isDeclScope(PrevDecl)) {
Diag(D.getIdentifierLoc(), diag::err_param_redefinition) << II;
Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
// Recover by removing the name
II = nullptr;
D.SetIdentifier(nullptr, D.getIdentifierLoc());
D.setInvalidType(true);
}
}
}
// Temporarily put parameter variables in the translation unit, not
// the enclosing context. This prevents them from accidentally
// looking like class members in C++.
ParmVarDecl *New = CheckParameter(Context.getTranslationUnitDecl(),
D.getLocStart(),
D.getIdentifierLoc(), II,
parmDeclType, TInfo,
SC);
if (D.isInvalidType())
New->setInvalidDecl();
assert(S->isFunctionPrototypeScope());
assert(S->getFunctionPrototypeDepth() >= 1);
New->setScopeInfo(S->getFunctionPrototypeDepth() - 1,
S->getNextFunctionPrototypeIndex());
// Add the parameter declaration into this scope.
S->AddDecl(New);
if (II)
IdResolver.AddDecl(New);
ProcessDeclAttributes(S, New, D);
if (D.getDeclSpec().isModulePrivateSpecified())
Diag(New->getLocation(), diag::err_module_private_local)
<< 1 << New->getDeclName()
<< SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
<< FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
if (New->hasAttr<BlocksAttr>()) {
Diag(New->getLocation(), diag::err_block_on_nonlocal);
}
return New;
}
/// Synthesizes a variable for a parameter arising from a
/// typedef.
ParmVarDecl *Sema::BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T) {
/* FIXME: setting StartLoc == Loc.
Would it be worth to modify callers so as to provide proper source
location for the unnamed parameters, embedding the parameter's type? */
ParmVarDecl *Param = ParmVarDecl::Create(Context, DC, Loc, Loc, nullptr,
T, Context.getTrivialTypeSourceInfo(T, Loc),
SC_None, nullptr);
Param->setImplicit();
return Param;
}
void Sema::DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters) {
// Don't diagnose unused-parameter errors in template instantiations; we
// will already have done so in the template itself.
if (inTemplateInstantiation())
return;
for (const ParmVarDecl *Parameter : Parameters) {
if (!Parameter->isReferenced() && Parameter->getDeclName() &&
!Parameter->hasAttr<UnusedAttr>()) {
Diag(Parameter->getLocation(), diag::warn_unused_parameter)
<< Parameter->getDeclName();
}
}
}
void Sema::DiagnoseSizeOfParametersAndReturnValue(
ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D) {
if (LangOpts.NumLargeByValueCopy == 0) // No check.
return;
// Warn if the return value is pass-by-value and larger than the specified
// threshold.
if (!ReturnTy->isDependentType() && ReturnTy.isPODType(Context)) {
unsigned Size = Context.getTypeSizeInChars(ReturnTy).getQuantity();
if (Size > LangOpts.NumLargeByValueCopy)
Diag(D->getLocation(), diag::warn_return_value_size)
<< D->getDeclName() << Size;
}
// Warn if any parameter is pass-by-value and larger than the specified
// threshold.
for (const ParmVarDecl *Parameter : Parameters) {
QualType T = Parameter->getType();
if (T->isDependentType() || !T.isPODType(Context))
continue;
unsigned Size = Context.getTypeSizeInChars(T).getQuantity();
if (Size > LangOpts.NumLargeByValueCopy)
Diag(Parameter->getLocation(), diag::warn_parameter_size)
<< Parameter->getDeclName() << Size;
}
}
ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC) {
// In ARC, infer a lifetime qualifier for appropriate parameter types.
if (getLangOpts().ObjCAutoRefCount &&
T.getObjCLifetime() == Qualifiers::OCL_None &&
T->isObjCLifetimeType()) {
Qualifiers::ObjCLifetime lifetime;
// Special cases for arrays:
// - if it's const, use __unsafe_unretained
// - otherwise, it's an error
if (T->isArrayType()) {
if (!T.isConstQualified()) {
DelayedDiagnostics.add(
sema::DelayedDiagnostic::makeForbiddenType(
NameLoc, diag::err_arc_array_param_no_ownership, T, false));
}
lifetime = Qualifiers::OCL_ExplicitNone;
} else {
lifetime = T->getObjCARCImplicitLifetime();
}
T = Context.getLifetimeQualifiedType(T, lifetime);
}
ParmVarDecl *New = ParmVarDecl::Create(Context, DC, StartLoc, NameLoc, Name,
Context.getAdjustedParameterType(T),
TSInfo, SC, nullptr);
// Parameters can not be abstract class types.
// For record types, this is done by the AbstractClassUsageDiagnoser once
// the class has been completely parsed.
if (!CurContext->isRecord() &&
RequireNonAbstractType(NameLoc, T, diag::err_abstract_type_in_decl,
AbstractParamType))
New->setInvalidDecl();
// Parameter declarators cannot be interface types. All ObjC objects are
// passed by reference.
if (T->isObjCObjectType()) {
SourceLocation TypeEndLoc =
getLocForEndOfToken(TSInfo->getTypeLoc().getLocEnd());
Diag(NameLoc,
diag::err_object_cannot_be_passed_returned_by_value) << 1 << T
<< FixItHint::CreateInsertion(TypeEndLoc, "*");
T = Context.getObjCObjectPointerType(T);
New->setType(T);
}
// ISO/IEC TR 18037 S6.7.3: "The type of an object with automatic storage
// duration shall not be qualified by an address-space qualifier."
// Since all parameters have automatic store duration, they can not have
// an address space.
if (T.getAddressSpace() != LangAS::Default &&
// OpenCL allows function arguments declared to be an array of a type
// to be qualified with an address space.
!(getLangOpts().OpenCL &&
(T->isArrayType() || T.getAddressSpace() == LangAS::opencl_private))) {
Diag(NameLoc, diag::err_arg_with_address_space);
New->setInvalidDecl();
}
return New;
}
void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls) {
DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
// Verify 6.9.1p6: 'every identifier in the identifier list shall be declared'
// for a K&R function.
if (!FTI.hasPrototype) {
for (int i = FTI.NumParams; i != 0; /* decrement in loop */) {
--i;
if (FTI.Params[i].Param == nullptr) {
SmallString<256> Code;
llvm::raw_svector_ostream(Code)
<< " int " << FTI.Params[i].Ident->getName() << ";\n";
Diag(FTI.Params[i].IdentLoc, diag::ext_param_not_declared)
<< FTI.Params[i].Ident
<< FixItHint::CreateInsertion(LocAfterDecls, Code);
// Implicitly declare the argument as type 'int' for lack of a better
// type.
AttributeFactory attrs;
DeclSpec DS(attrs);
const char* PrevSpec; // unused
unsigned DiagID; // unused
DS.SetTypeSpecType(DeclSpec::TST_int, FTI.Params[i].IdentLoc, PrevSpec,
DiagID, Context.getPrintingPolicy());
// Use the identifier location for the type source range.
DS.SetRangeStart(FTI.Params[i].IdentLoc);
DS.SetRangeEnd(FTI.Params[i].IdentLoc);
Declarator ParamD(DS, DeclaratorContext::KNRTypeListContext);
ParamD.SetIdentifier(FTI.Params[i].Ident, FTI.Params[i].IdentLoc);
FTI.Params[i].Param = ActOnParamDeclarator(S, ParamD);
}
}
}
}
Decl *
Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody) {
assert(getCurFunctionDecl() == nullptr && "Function parsing confused");
assert(D.isFunctionDeclarator() && "Not a function declarator!");
Scope *ParentScope = FnBodyScope->getParent();
D.setFunctionDefinitionKind(FDK_Definition);
Decl *DP = HandleDeclarator(ParentScope, D, TemplateParameterLists);
return ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody);
}
void Sema::ActOnFinishInlineFunctionDef(FunctionDecl *D) {
Consumer.HandleInlineFunctionDefinition(D);
}
static bool ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
const FunctionDecl*& PossibleZeroParamPrototype) {
// Don't warn about invalid declarations.
if (FD->isInvalidDecl())
return false;
// Or declarations that aren't global.
if (!FD->isGlobal())
return false;
// Don't warn about C++ member functions.
if (isa<CXXMethodDecl>(FD))
return false;
// Don't warn about 'main'.
if (FD->isMain())
return false;
// Don't warn about inline functions.
if (FD->isInlined())
return false;
// Don't warn about function templates.
if (FD->getDescribedFunctionTemplate())
return false;
// Don't warn about function template specializations.
if (FD->isFunctionTemplateSpecialization())
return false;
// Don't warn for OpenCL kernels.
if (FD->hasAttr<OpenCLKernelAttr>())
return false;
// Don't warn on explicitly deleted functions.
if (FD->isDeleted())
return false;
bool MissingPrototype = true;
for (const FunctionDecl *Prev = FD->getPreviousDecl();
Prev; Prev = Prev->getPreviousDecl()) {
// Ignore any declarations that occur in function or method
// scope, because they aren't visible from the header.
if (Prev->getLexicalDeclContext()->isFunctionOrMethod())
continue;
MissingPrototype = !Prev->getType()->isFunctionProtoType();
if (FD->getNumParams() == 0)
PossibleZeroParamPrototype = Prev;
break;
}
return MissingPrototype;
}
void
Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
const FunctionDecl *EffectiveDefinition,
SkipBodyInfo *SkipBody) {
const FunctionDecl *Definition = EffectiveDefinition;
if (!Definition && !FD->isDefined(Definition) && !FD->isCXXClassMember()) {
// If this is a friend function defined in a class template, it does not
// have a body until it is used, nevertheless it is a definition, see
// [temp.inst]p2:
//
// ... for the purpose of determining whether an instantiated redeclaration
// is valid according to [basic.def.odr] and [class.mem], a declaration that
// corresponds to a definition in the template is considered to be a
// definition.
//
// The following code must produce redefinition error:
//
// template<typename T> struct C20 { friend void func_20() {} };
// C20<int> c20i;
// void func_20() {}
//
for (auto I : FD->redecls()) {
if (I != FD && !I->isInvalidDecl() &&
I->getFriendObjectKind() != Decl::FOK_None) {
if (FunctionDecl *Original = I->getInstantiatedFromMemberFunction()) {
if (FunctionDecl *OrigFD = FD->getInstantiatedFromMemberFunction()) {
// A merged copy of the same function, instantiated as a member of
// the same class, is OK.
if (declaresSameEntity(OrigFD, Original) &&
declaresSameEntity(cast<Decl>(I->getLexicalDeclContext()),
cast<Decl>(FD->getLexicalDeclContext())))
continue;
}
if (Original->isThisDeclarationADefinition()) {
Definition = I;
break;
}
}
}
}
}
if (!Definition)
return;
if (canRedefineFunction(Definition, getLangOpts()))
return;
// Don't emit an error when this is redefinition of a typo-corrected
// definition.
if (TypoCorrectedFunctionDefinitions.count(Definition))
return;
// If we don't have a visible definition of the function, and it's inline or
// a template, skip the new definition.
if (SkipBody && !hasVisibleDefinition(Definition) &&
(Definition->getFormalLinkage() == InternalLinkage ||
Definition->isInlined() ||
Definition->getDescribedFunctionTemplate() ||
Definition->getNumTemplateParameterLists())) {
SkipBody->ShouldSkip = true;
if (auto *TD = Definition->getDescribedFunctionTemplate())
makeMergedDefinitionVisible(TD);
makeMergedDefinitionVisible(const_cast<FunctionDecl*>(Definition));
return;
}
if (getLangOpts().GNUMode && Definition->isInlineSpecified() &&
Definition->getStorageClass() == SC_Extern)
Diag(FD->getLocation(), diag::err_redefinition_extern_inline)
<< FD->getDeclName() << getLangOpts().CPlusPlus;
else
Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName();
Diag(Definition->getLocation(), diag::note_previous_definition);
FD->setInvalidDecl();
}
static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
Sema &S) {
CXXRecordDecl *const LambdaClass = CallOperator->getParent();
LambdaScopeInfo *LSI = S.PushLambdaScope();
LSI->CallOperator = CallOperator;
LSI->Lambda = LambdaClass;
LSI->ReturnType = CallOperator->getReturnType();
const LambdaCaptureDefault LCD = LambdaClass->getLambdaCaptureDefault();
if (LCD == LCD_None)
LSI->ImpCaptureStyle = CapturingScopeInfo::ImpCap_None;
else if (LCD == LCD_ByCopy)
LSI->ImpCaptureStyle = CapturingScopeInfo::ImpCap_LambdaByval;
else if (LCD == LCD_ByRef)
LSI->ImpCaptureStyle = CapturingScopeInfo::ImpCap_LambdaByref;
DeclarationNameInfo DNI = CallOperator->getNameInfo();
LSI->IntroducerRange = DNI.getCXXOperatorNameRange();
LSI->Mutable = !CallOperator->isConst();
// Add the captures to the LSI so they can be noted as already
// captured within tryCaptureVar.
auto I = LambdaClass->field_begin();
for (const auto &C : LambdaClass->captures()) {
if (C.capturesVariable()) {
VarDecl *VD = C.getCapturedVar();
if (VD->isInitCapture())
S.CurrentInstantiationScope->InstantiatedLocal(VD, VD);
QualType CaptureType = VD->getType();
const bool ByRef = C.getCaptureKind() == LCK_ByRef;
LSI->addCapture(VD, /*IsBlock*/false, ByRef,
/*RefersToEnclosingVariableOrCapture*/true, C.getLocation(),
/*EllipsisLoc*/C.isPackExpansion()
? C.getEllipsisLoc() : SourceLocation(),
CaptureType, /*Expr*/ nullptr);
} else if (C.capturesThis()) {
LSI->addThisCapture(/*Nested*/ false, C.getLocation(),
/*Expr*/ nullptr,
C.getCaptureKind() == LCK_StarThis);
} else {
LSI->addVLATypeCapture(C.getLocation(), I->getType());
}
++I;
}
}
Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
SkipBodyInfo *SkipBody) {
if (!D) {
// Parsing the function declaration failed in some way. Push on a fake scope
// anyway so we can try to parse the function body.
PushFunctionScope();
return D;
}
FunctionDecl *FD = nullptr;
if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D))
FD = FunTmpl->getTemplatedDecl();
else
FD = cast<FunctionDecl>(D);
// Check for defining attributes before the check for redefinition.
if (const auto *Attr = FD->getAttr<AliasAttr>()) {
Diag(Attr->getLocation(), diag::err_alias_is_definition) << FD << 0;
FD->dropAttr<AliasAttr>();
FD->setInvalidDecl();
}
if (const auto *Attr = FD->getAttr<IFuncAttr>()) {
Diag(Attr->getLocation(), diag::err_alias_is_definition) << FD << 1;
FD->dropAttr<IFuncAttr>();
FD->setInvalidDecl();
}
// See if this is a redefinition. If 'will have body' is already set, then
// these checks were already performed when it was set.
if (!FD->willHaveBody() && !FD->isLateTemplateParsed()) {
CheckForFunctionRedefinition(FD, nullptr, SkipBody);
// If we're skipping the body, we're done. Don't enter the scope.
if (SkipBody && SkipBody->ShouldSkip)
return D;
}
// Mark this function as "will have a body eventually". This lets users to
// call e.g. isInlineDefinitionExternallyVisible while we're still parsing
// this function.
FD->setWillHaveBody();
// If we are instantiating a generic lambda call operator, push
// a LambdaScopeInfo onto the function stack. But use the information
// that's already been calculated (ActOnLambdaExpr) to prime the current
// LambdaScopeInfo.
// When the template operator is being specialized, the LambdaScopeInfo,
// has to be properly restored so that tryCaptureVariable doesn't try
// and capture any new variables. In addition when calculating potential
// captures during transformation of nested lambdas, it is necessary to
// have the LSI properly restored.
if (isGenericLambdaCallOperatorSpecialization(FD)) {
assert(inTemplateInstantiation() &&
"There should be an active template instantiation on the stack "
"when instantiating a generic lambda!");
RebuildLambdaScopeInfo(cast<CXXMethodDecl>(D), *this);
} else {
// Enter a new function scope
PushFunctionScope();
}
// Builtin functions cannot be defined.
if (unsigned BuiltinID = FD->getBuiltinID()) {
if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID) &&
!Context.BuiltinInfo.isPredefinedRuntimeFunction(BuiltinID)) {
Diag(FD->getLocation(), diag::err_builtin_definition) << FD;
FD->setInvalidDecl();
}
}
// The return type of a function definition must be complete
// (C99 6.9.1p3, C++ [dcl.fct]p6).
QualType ResultType = FD->getReturnType();
if (!ResultType->isDependentType() && !ResultType->isVoidType() &&
!FD->isInvalidDecl() &&
RequireCompleteType(FD->getLocation(), ResultType,
diag::err_func_def_incomplete_result))
FD->setInvalidDecl();
if (FnBodyScope)
PushDeclContext(FnBodyScope, FD);
// Check the validity of our function parameters
CheckParmsForFunctionDef(FD->parameters(),
/*CheckParameterNames=*/true);
// Add non-parameter declarations already in the function to the current
// scope.
if (FnBodyScope) {
for (Decl *NPD : FD->decls()) {
auto *NonParmDecl = dyn_cast<NamedDecl>(NPD);
if (!NonParmDecl)
continue;
assert(!isa<ParmVarDecl>(NonParmDecl) &&
"parameters should not be in newly created FD yet");
// If the decl has a name, make it accessible in the current scope.
if (NonParmDecl->getDeclName())
PushOnScopeChains(NonParmDecl, FnBodyScope, /*AddToContext=*/false);
// Similarly, dive into enums and fish their constants out, making them
// accessible in this scope.
if (auto *ED = dyn_cast<EnumDecl>(NonParmDecl)) {
for (auto *EI : ED->enumerators())
PushOnScopeChains(EI, FnBodyScope, /*AddToContext=*/false);
}
}
}
// Introduce our parameters into the function scope
for (auto Param : FD->parameters()) {
Param->setOwningFunction(FD);
// If this has an identifier, add it to the scope stack.
if (Param->getIdentifier() && FnBodyScope) {
CheckShadow(FnBodyScope, Param);
PushOnScopeChains(Param, FnBodyScope);
}
}
// Ensure that the function's exception specification is instantiated.
if (const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>())
ResolveExceptionSpec(D->getLocation(), FPT);
// dllimport cannot be applied to non-inline function definitions.
if (FD->hasAttr<DLLImportAttr>() && !FD->isInlined() &&
!FD->isTemplateInstantiation()) {
assert(!FD->hasAttr<DLLExportAttr>());
Diag(FD->getLocation(), diag::err_attribute_dllimport_function_definition);
FD->setInvalidDecl();
return D;
}
// We want to attach documentation to original Decl (which might be
// a function template).
ActOnDocumentableDecl(D);
if (getCurLexicalContext()->isObjCContainer() &&
getCurLexicalContext()->getDeclKind() != Decl::ObjCCategoryImpl &&
getCurLexicalContext()->getDeclKind() != Decl::ObjCImplementation)
Diag(FD->getLocation(), diag::warn_function_def_in_objc_container);
return D;
}
/// Given the set of return statements within a function body,
/// compute the variables that are subject to the named return value
/// optimization.
///
/// Each of the variables that is subject to the named return value
/// optimization will be marked as NRVO variables in the AST, and any
/// return statement that has a marked NRVO variable as its NRVO candidate can
/// use the named return value optimization.
///
/// This function applies a very simplistic algorithm for NRVO: if every return
/// statement in the scope of a variable has the same NRVO candidate, that
/// candidate is an NRVO variable.
void Sema::computeNRVO(Stmt *Body, FunctionScopeInfo *Scope) {
ReturnStmt **Returns = Scope->Returns.data();
for (unsigned I = 0, E = Scope->Returns.size(); I != E; ++I) {
if (const VarDecl *NRVOCandidate = Returns[I]->getNRVOCandidate()) {
if (!NRVOCandidate->isNRVOVariable())
Returns[I]->setNRVOCandidate(nullptr);
}
}
}
bool Sema::canDelayFunctionBody(const Declarator &D) {
// We can't delay parsing the body of a constexpr function template (yet).
if (D.getDeclSpec().isConstexprSpecified())
return false;
// We can't delay parsing the body of a function template with a deduced
// return type (yet).
if (D.getDeclSpec().hasAutoTypeSpec()) {
// If the placeholder introduces a non-deduced trailing return type,
// we can still delay parsing it.
if (D.getNumTypeObjects()) {
const auto &Outer = D.getTypeObject(D.getNumTypeObjects() - 1);
if (Outer.Kind == DeclaratorChunk::Function &&
Outer.Fun.hasTrailingReturnType()) {
QualType Ty = GetTypeFromParser(Outer.Fun.getTrailingReturnType());
return Ty.isNull() || !Ty->isUndeducedType();
}
}
return false;
}
return true;
}
bool Sema::canSkipFunctionBody(Decl *D) {
// We cannot skip the body of a function (or function template) which is
// constexpr, since we may need to evaluate its body in order to parse the
// rest of the file.
// We cannot skip the body of a function with an undeduced return type,
// because any callers of that function need to know the type.
if (const FunctionDecl *FD = D->getAsFunction()) {
if (FD->isConstexpr())
return false;
// We can't simply call Type::isUndeducedType here, because inside template
// auto can be deduced to a dependent type, which is not considered
// "undeduced".
if (FD->getReturnType()->getContainedDeducedType())
return false;
}
return Consumer.shouldSkipFunctionBody(D);
}
Decl *Sema::ActOnSkippedFunctionBody(Decl *Decl) {
if (!Decl)
return nullptr;
if (FunctionDecl *FD = Decl->getAsFunction())
FD->setHasSkippedBody();
else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(Decl))
MD->setHasSkippedBody();
return Decl;
}
Decl *Sema::ActOnFinishFunctionBody(Decl *D, Stmt *BodyArg) {
return ActOnFinishFunctionBody(D, BodyArg, false);
}
Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
bool IsInstantiation) {
FunctionDecl *FD = dcl ? dcl->getAsFunction() : nullptr;
sema::AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
sema::AnalysisBasedWarnings::Policy *ActivePolicy = nullptr;
if (getLangOpts().CoroutinesTS && getCurFunction()->isCoroutine())
CheckCompletedCoroutineBody(FD, Body);
if (FD) {
FD->setBody(Body);
FD->setWillHaveBody(false);
if (getLangOpts().CPlusPlus14) {
if (!FD->isInvalidDecl() && Body && !FD->isDependentContext() &&
FD->getReturnType()->isUndeducedType()) {
// If the function has a deduced result type but contains no 'return'
// statements, the result type as written must be exactly 'auto', and
// the deduced result type is 'void'.
if (!FD->getReturnType()->getAs<AutoType>()) {
Diag(dcl->getLocation(), diag::err_auto_fn_no_return_but_not_auto)
<< FD->getReturnType();
FD->setInvalidDecl();
} else {
// Substitute 'void' for the 'auto' in the type.
TypeLoc ResultType = getReturnTypeLoc(FD);
Context.adjustDeducedFunctionResultType(
FD, SubstAutoType(ResultType.getType(), Context.VoidTy));
}
}
} else if (getLangOpts().CPlusPlus11 && isLambdaCallOperator(FD)) {
// In C++11, we don't use 'auto' deduction rules for lambda call
// operators because we don't support return type deduction.
auto *LSI = getCurLambda();
if (LSI->HasImplicitReturnType) {
deduceClosureReturnType(*LSI);
// C++11 [expr.prim.lambda]p4:
// [...] if there are no return statements in the compound-statement
// [the deduced type is] the type void
QualType RetType =
LSI->ReturnType.isNull() ? Context.VoidTy : LSI->ReturnType;
// Update the return type to the deduced type.
const FunctionProtoType *Proto =
FD->getType()->getAs<FunctionProtoType>();
FD->setType(Context.getFunctionType(RetType, Proto->getParamTypes(),
Proto->getExtProtoInfo()));
}
}
// If the function implicitly returns zero (like 'main') or is naked,
// don't complain about missing return statements.
if (FD->hasImplicitReturnZero() || FD->hasAttr<NakedAttr>())
WP.disableCheckFallThrough();
// MSVC permits the use of pure specifier (=0) on function definition,
// defined at class scope, warn about this non-standard construct.
if (getLangOpts().MicrosoftExt && FD->isPure() && FD->isCanonicalDecl())
Diag(FD->getLocation(), diag::ext_pure_function_definition);
if (!FD->isInvalidDecl()) {
// Don't diagnose unused parameters of defaulted or deleted functions.
if (!FD->isDeleted() && !FD->isDefaulted())
DiagnoseUnusedParameters(FD->parameters());
DiagnoseSizeOfParametersAndReturnValue(FD->parameters(),
FD->getReturnType(), FD);
// If this is a structor, we need a vtable.
if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(FD))
MarkVTableUsed(FD->getLocation(), Constructor->getParent());
else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(FD))
MarkVTableUsed(FD->getLocation(), Destructor->getParent());
// Try to apply the named return value optimization. We have to check
// if we can do this here because lambdas keep return statements around
// to deduce an implicit return type.
if (FD->getReturnType()->isRecordType() &&
(!getLangOpts().CPlusPlus || !FD->isDependentContext()))
computeNRVO(Body, getCurFunction());
}
// GNU warning -Wmissing-prototypes:
// Warn if a global function is defined without a previous
// prototype declaration. This warning is issued even if the
// definition itself provides a prototype. The aim is to detect
// global functions that fail to be declared in header files.
const FunctionDecl *PossibleZeroParamPrototype = nullptr;
if (ShouldWarnAboutMissingPrototype(FD, PossibleZeroParamPrototype)) {
Diag(FD->getLocation(), diag::warn_missing_prototype) << FD;
if (PossibleZeroParamPrototype) {
// We found a declaration that is not a prototype,
// but that could be a zero-parameter prototype
if (TypeSourceInfo *TI =
PossibleZeroParamPrototype->getTypeSourceInfo()) {
TypeLoc TL = TI->getTypeLoc();
if (FunctionNoProtoTypeLoc FTL = TL.getAs<FunctionNoProtoTypeLoc>())
Diag(PossibleZeroParamPrototype->getLocation(),
diag::note_declaration_not_a_prototype)
<< PossibleZeroParamPrototype
<< FixItHint::CreateInsertion(FTL.getRParenLoc(), "void");
}
}
// GNU warning -Wstrict-prototypes
// Warn if K&R function is defined without a previous declaration.
// This warning is issued only if the definition itself does not provide
// a prototype. Only K&R definitions do not provide a prototype.
// An empty list in a function declarator that is part of a definition
// of that function specifies that the function has no parameters
// (C99 6.7.5.3p14)
if (!FD->hasWrittenPrototype() && FD->getNumParams() > 0 &&
!LangOpts.CPlusPlus) {
TypeSourceInfo *TI = FD->getTypeSourceInfo();
TypeLoc TL = TI->getTypeLoc();
FunctionTypeLoc FTL = TL.getAsAdjusted<FunctionTypeLoc>();
Diag(FTL.getLParenLoc(), diag::warn_strict_prototypes) << 2;
}
}
// Warn on CPUDispatch with an actual body.
if (FD->isMultiVersion() && FD->hasAttr<CPUDispatchAttr>() && Body)
if (const auto *CmpndBody = dyn_cast<CompoundStmt>(Body))
if (!CmpndBody->body_empty())
Diag(CmpndBody->body_front()->getLocStart(),
diag::warn_dispatch_body_ignored);
if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
const CXXMethodDecl *KeyFunction;
if (MD->isOutOfLine() && (MD = MD->getCanonicalDecl()) &&
MD->isVirtual() &&
(KeyFunction = Context.getCurrentKeyFunction(MD->getParent())) &&
MD == KeyFunction->getCanonicalDecl()) {
// Update the key-function state if necessary for this ABI.
if (FD->isInlined() &&
!Context.getTargetInfo().getCXXABI().canKeyFunctionBeInline()) {
Context.setNonKeyFunction(MD);
// If the newly-chosen key function is already defined, then we
// need to mark the vtable as used retroactively.
KeyFunction = Context.getCurrentKeyFunction(MD->getParent());
const FunctionDecl *Definition;
if (KeyFunction && KeyFunction->isDefined(Definition))
MarkVTableUsed(Definition->getLocation(), MD->getParent(), true);
} else {
// We just defined they key function; mark the vtable as used.
MarkVTableUsed(FD->getLocation(), MD->getParent(), true);
}
}
}
assert((FD == getCurFunctionDecl() || getCurLambda()->CallOperator == FD) &&
"Function parsing confused");
} else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) {
assert(MD == getCurMethodDecl() && "Method parsing confused");
MD->setBody(Body);
if (!MD->isInvalidDecl()) {
DiagnoseUnusedParameters(MD->parameters());
DiagnoseSizeOfParametersAndReturnValue(MD->parameters(),
MD->getReturnType(), MD);
if (Body)
computeNRVO(Body, getCurFunction());
}
if (getCurFunction()->ObjCShouldCallSuper) {
Diag(MD->getLocEnd(), diag::warn_objc_missing_super_call)
<< MD->getSelector().getAsString();
getCurFunction()->ObjCShouldCallSuper = false;
}
if (getCurFunction()->ObjCWarnForNoDesignatedInitChain) {
const ObjCMethodDecl *InitMethod = nullptr;
bool isDesignated =
MD->isDesignatedInitializerForTheInterface(&InitMethod);
assert(isDesignated && InitMethod);
(void)isDesignated;
auto superIsNSObject = [&](const ObjCMethodDecl *MD) {
auto IFace = MD->getClassInterface();
if (!IFace)
return false;
auto SuperD = IFace->getSuperClass();
if (!SuperD)
return false;
return SuperD->getIdentifier() ==
NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject);
};
// Don't issue this warning for unavailable inits or direct subclasses
// of NSObject.
if (!MD->isUnavailable() && !superIsNSObject(MD)) {
Diag(MD->getLocation(),
diag::warn_objc_designated_init_missing_super_call);
Diag(InitMethod->getLocation(),
diag::note_objc_designated_init_marked_here);
}
getCurFunction()->ObjCWarnForNoDesignatedInitChain = false;
}
if (getCurFunction()->ObjCWarnForNoInitDelegation) {
// Don't issue this warning for unavaialable inits.
if (!MD->isUnavailable())
Diag(MD->getLocation(),
diag::warn_objc_secondary_init_missing_init_call);
getCurFunction()->ObjCWarnForNoInitDelegation = false;
}
} else {
// Parsing the function declaration failed in some way. Pop the fake scope
// we pushed on.
PopFunctionScopeInfo(ActivePolicy, dcl);
return nullptr;
}
if (Body && getCurFunction()->HasPotentialAvailabilityViolations)
DiagnoseUnguardedAvailabilityViolations(dcl);
assert(!getCurFunction()->ObjCShouldCallSuper &&
"This should only be set for ObjC methods, which should have been "
"handled in the block above.");
// Verify and clean out per-function state.
if (Body && (!FD || !FD->isDefaulted())) {
// C++ constructors that have function-try-blocks can't have return
// statements in the handlers of that block. (C++ [except.handle]p14)
// Verify this.
if (FD && isa<CXXConstructorDecl>(FD) && isa<CXXTryStmt>(Body))
DiagnoseReturnInConstructorExceptionHandler(cast<CXXTryStmt>(Body));
// Verify that gotos and switch cases don't jump into scopes illegally.
if (getCurFunction()->NeedsScopeChecking() &&
!PP.isCodeCompletionEnabled())
DiagnoseInvalidJumps(Body);
if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(dcl)) {
if (!Destructor->getParent()->isDependentType())
CheckDestructor(Destructor);
MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(),
Destructor->getParent());
}
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
if (getDiagnostics().hasErrorOccurred() ||
getDiagnostics().getSuppressAllDiagnostics()) {
DiscardCleanupsInEvaluationContext();
}
if (!getDiagnostics().hasUncompilableErrorOccurred() &&
!isa<FunctionTemplateDecl>(dcl)) {
// Since the body is valid, issue any analysis-based warnings that are
// enabled.
ActivePolicy = &WP;
}
if (!IsInstantiation && FD && FD->isConstexpr() && !FD->isInvalidDecl() &&
(!CheckConstexprFunctionDecl(FD) ||
!CheckConstexprFunctionBody(FD, Body)))
FD->setInvalidDecl();
if (FD && FD->hasAttr<NakedAttr>()) {
for (const Stmt *S : Body->children()) {
// Allow local register variables without initializer as they don't
// require prologue.
bool RegisterVariables = false;
if (auto *DS = dyn_cast<DeclStmt>(S)) {
for (const auto *Decl : DS->decls()) {
if (const auto *Var = dyn_cast<VarDecl>(Decl)) {
RegisterVariables =
Var->hasAttr<AsmLabelAttr>() && !Var->hasInit();
if (!RegisterVariables)
break;
}
}
}
if (RegisterVariables)
continue;
if (!isa<AsmStmt>(S) && !isa<NullStmt>(S)) {
Diag(S->getLocStart(), diag::err_non_asm_stmt_in_naked_function);
Diag(FD->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
FD->setInvalidDecl();
break;
}
}
}
assert(ExprCleanupObjects.size() ==
ExprEvalContexts.back().NumCleanupObjects &&
"Leftover temporaries in function");
assert(!Cleanup.exprNeedsCleanups() && "Unaccounted cleanups in function");
assert(MaybeODRUseExprs.empty() &&
"Leftover expressions for odr-use checking");
}
if (!IsInstantiation)
PopDeclContext();
PopFunctionScopeInfo(ActivePolicy, dcl);
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
if (getDiagnostics().hasErrorOccurred()) {
DiscardCleanupsInEvaluationContext();
}
return dcl;
}
/// When we finish delayed parsing of an attribute, we must attach it to the
/// relevant Decl.
void Sema::ActOnFinishDelayedAttribute(Scope *S, Decl *D,
ParsedAttributes &Attrs) {
// Always attach attributes to the underlying decl.
if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
D = TD->getTemplatedDecl();
ProcessDeclAttributeList(S, D, Attrs);
if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(D))
if (Method->isStatic())
checkThisInStaticMemberFunctionAttributes(Method);
}
/// ImplicitlyDefineFunction - An undeclared identifier was used in a function
/// call, forming a call to an implicitly defined function (per C99 6.5.1p2).
NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
IdentifierInfo &II, Scope *S) {
// Find the scope in which the identifier is injected and the corresponding
// DeclContext.
// FIXME: C89 does not say what happens if there is no enclosing block scope.
// In that case, we inject the declaration into the translation unit scope
// instead.
Scope *BlockScope = S;
while (!BlockScope->isCompoundStmtScope() && BlockScope->getParent())
BlockScope = BlockScope->getParent();
Scope *ContextScope = BlockScope;
while (!ContextScope->getEntity())
ContextScope = ContextScope->getParent();
ContextRAII SavedContext(*this, ContextScope->getEntity());
// Before we produce a declaration for an implicitly defined
// function, see whether there was a locally-scoped declaration of
// this name as a function or variable. If so, use that
// (non-visible) declaration, and complain about it.
NamedDecl *ExternCPrev = findLocallyScopedExternCDecl(&II);
if (ExternCPrev) {
// We still need to inject the function into the enclosing block scope so
// that later (non-call) uses can see it.
PushOnScopeChains(ExternCPrev, BlockScope, /*AddToContext*/false);
// C89 footnote 38:
// If in fact it is not defined as having type "function returning int",
// the behavior is undefined.
if (!isa<FunctionDecl>(ExternCPrev) ||
!Context.typesAreCompatible(
cast<FunctionDecl>(ExternCPrev)->getType(),
Context.getFunctionNoProtoType(Context.IntTy))) {
Diag(Loc, diag::ext_use_out_of_scope_declaration)
<< ExternCPrev << !getLangOpts().C99;
Diag(ExternCPrev->getLocation(), diag::note_previous_declaration);
return ExternCPrev;
}
}
// Extension in C99. Legal in C90, but warn about it.
// OpenCL v2.0 s6.9.u - Implicit function declaration is not supported.
unsigned diag_id;
if (II.getName().startswith("__builtin_"))
diag_id = diag::warn_builtin_unknown;
else if (getLangOpts().C99 || getLangOpts().OpenCL)
diag_id = diag::ext_implicit_function_decl;
else
diag_id = diag::warn_implicit_function_decl;
Diag(Loc, diag_id) << &II << getLangOpts().OpenCL;
// If we found a prior declaration of this function, don't bother building
// another one. We've already pushed that one into scope, so there's nothing
// more to do.
if (ExternCPrev)
return ExternCPrev;
// Because typo correction is expensive, only do it if the implicit
// function declaration is going to be treated as an error.
if (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error) {
TypoCorrection Corrected;
if (S &&
(Corrected = CorrectTypo(
DeclarationNameInfo(&II, Loc), LookupOrdinaryName, S, nullptr,
llvm::make_unique<DeclFilterCCC<FunctionDecl>>(), CTK_NonError)))
diagnoseTypo(Corrected, PDiag(diag::note_function_suggestion),
/*ErrorRecovery*/false);
}
// Set a Declarator for the implicit definition: int foo();
const char *Dummy;
AttributeFactory attrFactory;
DeclSpec DS(attrFactory);
unsigned DiagID;
bool Error = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, Dummy, DiagID,
Context.getPrintingPolicy());
(void)Error; // Silence warning.
assert(!Error && "Error setting up implicit decl!");
SourceLocation NoLoc;
Declarator D(DS, DeclaratorContext::BlockContext);
D.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/false,
/*IsAmbiguous=*/false,
/*LParenLoc=*/NoLoc,
/*Params=*/nullptr,
/*NumParams=*/0,
/*EllipsisLoc=*/NoLoc,
/*RParenLoc=*/NoLoc,
/*TypeQuals=*/0,
/*RefQualifierIsLvalueRef=*/true,
/*RefQualifierLoc=*/NoLoc,
/*ConstQualifierLoc=*/NoLoc,
/*VolatileQualifierLoc=*/NoLoc,
/*RestrictQualifierLoc=*/NoLoc,
/*MutableLoc=*/NoLoc, EST_None,
/*ESpecRange=*/SourceRange(),
/*Exceptions=*/nullptr,
/*ExceptionRanges=*/nullptr,
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
/*ExceptionSpecTokens=*/nullptr,
/*DeclsInPrototype=*/None, Loc,
Loc, D),
std::move(DS.getAttributes()), SourceLocation());
D.SetIdentifier(&II, Loc);
// Insert this function into the enclosing block scope.
FunctionDecl *FD = cast<FunctionDecl>(ActOnDeclarator(BlockScope, D));
FD->setImplicit();
AddKnownFunctionAttributes(FD);
return FD;
}
/// Adds any function attributes that we know a priori based on
/// the declaration of this function.
///
/// These attributes can apply both to implicitly-declared builtins
/// (like __builtin___printf_chk) or to library-declared functions
/// like NSLog or printf.
///
/// We need to check for duplicate attributes both here and where user-written
/// attributes are applied to declarations.
void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
if (FD->isInvalidDecl())
return;
// If this is a built-in function, map its builtin attributes to
// actual attributes.
if (unsigned BuiltinID = FD->getBuiltinID()) {
// Handle printf-formatting attributes.
unsigned FormatIdx;
bool HasVAListArg;
if (Context.BuiltinInfo.isPrintfLike(BuiltinID, FormatIdx, HasVAListArg)) {
if (!FD->hasAttr<FormatAttr>()) {
const char *fmt = "printf";
unsigned int NumParams = FD->getNumParams();
if (FormatIdx < NumParams && // NumParams may be 0 (e.g. vfprintf)
FD->getParamDecl(FormatIdx)->getType()->isObjCObjectPointerType())
fmt = "NSString";
FD->addAttr(FormatAttr::CreateImplicit(Context,
&Context.Idents.get(fmt),
FormatIdx+1,
HasVAListArg ? 0 : FormatIdx+2,
FD->getLocation()));
}
}
if (Context.BuiltinInfo.isScanfLike(BuiltinID, FormatIdx,
HasVAListArg)) {
if (!FD->hasAttr<FormatAttr>())
FD->addAttr(FormatAttr::CreateImplicit(Context,
&Context.Idents.get("scanf"),
FormatIdx+1,
HasVAListArg ? 0 : FormatIdx+2,
FD->getLocation()));
}
// Mark const if we don't care about errno and that is the only thing
// preventing the function from being const. This allows IRgen to use LLVM
// intrinsics for such functions.
if (!getLangOpts().MathErrno && !FD->hasAttr<ConstAttr>() &&
Context.BuiltinInfo.isConstWithoutErrno(BuiltinID))
FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
// We make "fma" on some platforms const because we know it does not set
// errno in those environments even though it could set errno based on the
// C standard.
const llvm::Triple &Trip = Context.getTargetInfo().getTriple();
if ((Trip.isGNUEnvironment() || Trip.isAndroid() || Trip.isOSMSVCRT()) &&
!FD->hasAttr<ConstAttr>()) {
switch (BuiltinID) {
case Builtin::BI__builtin_fma:
case Builtin::BI__builtin_fmaf:
case Builtin::BI__builtin_fmal:
case Builtin::BIfma:
case Builtin::BIfmaf:
case Builtin::BIfmal:
FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
break;
default:
break;
}
}
if (Context.BuiltinInfo.isReturnsTwice(BuiltinID) &&
!FD->hasAttr<ReturnsTwiceAttr>())
FD->addAttr(ReturnsTwiceAttr::CreateImplicit(Context,
FD->getLocation()));
if (Context.BuiltinInfo.isNoThrow(BuiltinID) && !FD->hasAttr<NoThrowAttr>())
FD->addAttr(NoThrowAttr::CreateImplicit(Context, FD->getLocation()));
if (Context.BuiltinInfo.isPure(BuiltinID) && !FD->hasAttr<PureAttr>())
FD->addAttr(PureAttr::CreateImplicit(Context, FD->getLocation()));
if (Context.BuiltinInfo.isConst(BuiltinID) && !FD->hasAttr<ConstAttr>())
FD->addAttr(ConstAttr::CreateImplicit(Context, FD->getLocation()));
if (getLangOpts().CUDA && Context.BuiltinInfo.isTSBuiltin(BuiltinID) &&
!FD->hasAttr<CUDADeviceAttr>() && !FD->hasAttr<CUDAHostAttr>()) {
// Add the appropriate attribute, depending on the CUDA compilation mode
// and which target the builtin belongs to. For example, during host
// compilation, aux builtins are __device__, while the rest are __host__.
if (getLangOpts().CUDAIsDevice !=
Context.BuiltinInfo.isAuxBuiltinID(BuiltinID))
FD->addAttr(CUDADeviceAttr::CreateImplicit(Context, FD->getLocation()));
else
FD->addAttr(CUDAHostAttr::CreateImplicit(Context, FD->getLocation()));
}
}
// If C++ exceptions are enabled but we are told extern "C" functions cannot
// throw, add an implicit nothrow attribute to any extern "C" function we come
// across.
if (getLangOpts().CXXExceptions && getLangOpts().ExternCNoUnwind &&
FD->isExternC() && !FD->hasAttr<NoThrowAttr>()) {
const auto *FPT = FD->getType()->getAs<FunctionProtoType>();
if (!FPT || FPT->getExceptionSpecType() == EST_None)
FD->addAttr(NoThrowAttr::CreateImplicit(Context, FD->getLocation()));
}
IdentifierInfo *Name = FD->getIdentifier();
if (!Name)
return;
if ((!getLangOpts().CPlusPlus &&
FD->getDeclContext()->isTranslationUnit()) ||
(isa<LinkageSpecDecl>(FD->getDeclContext()) &&
cast<LinkageSpecDecl>(FD->getDeclContext())->getLanguage() ==
LinkageSpecDecl::lang_c)) {
// Okay: this could be a libc/libm/Objective-C function we know
// about.
} else
return;
if (Name->isStr("asprintf") || Name->isStr("vasprintf")) {
// FIXME: asprintf and vasprintf aren't C99 functions. Should they be
// target-specific builtins, perhaps?
if (!FD->hasAttr<FormatAttr>())
FD->addAttr(FormatAttr::CreateImplicit(Context,
&Context.Idents.get("printf"), 2,
Name->isStr("vasprintf") ? 0 : 3,
FD->getLocation()));
}
if (Name->isStr("__CFStringMakeConstantString")) {
// We already have a __builtin___CFStringMakeConstantString,
// but builds that use -fno-constant-cfstrings don't go through that.
if (!FD->hasAttr<FormatArgAttr>())
FD->addAttr(FormatArgAttr::CreateImplicit(Context, ParamIdx(1, FD),
FD->getLocation()));
}
}
TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo) {
assert(D.getIdentifier() && "Wrong callback for declspec without declarator");
assert(!T.isNull() && "GetTypeForDeclarator() returned null type");
if (!TInfo) {
assert(D.isInvalidType() && "no declarator info for valid type");
TInfo = Context.getTrivialTypeSourceInfo(T);
}
// Scope manipulation handled by caller.
TypedefDecl *NewTD = TypedefDecl::Create(Context, CurContext,
D.getLocStart(),
D.getIdentifierLoc(),
D.getIdentifier(),
TInfo);
// Bail out immediately if we have an invalid declaration.
if (D.isInvalidType()) {
NewTD->setInvalidDecl();
return NewTD;
}
if (D.getDeclSpec().isModulePrivateSpecified()) {
if (CurContext->isFunctionOrMethod())
Diag(NewTD->getLocation(), diag::err_module_private_local)
<< 2 << NewTD->getDeclName()
<< SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
<< FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
else
NewTD->setModulePrivate();
}
// C++ [dcl.typedef]p8:
// If the typedef declaration defines an unnamed class (or
// enum), the first typedef-name declared by the declaration
// to be that class type (or enum type) is used to denote the
// class type (or enum type) for linkage purposes only.
// We need to check whether the type was declared in the declaration.
switch (D.getDeclSpec().getTypeSpecType()) {
case TST_enum:
case TST_struct:
case TST_interface:
case TST_union:
case TST_class: {
TagDecl *tagFromDeclSpec = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
setTagNameForLinkagePurposes(tagFromDeclSpec, NewTD);
break;
}
default:
break;
}
return NewTD;
}
/// Check that this is a valid underlying type for an enum declaration.
bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
QualType T = TI->getType();
if (T->isDependentType())
return false;
if (const BuiltinType *BT = T->getAs<BuiltinType>())
if (BT->isInteger())
return false;
Diag(UnderlyingLoc, diag::err_enum_invalid_underlying) << T;
return true;
}
/// Check whether this is a valid redeclaration of a previous enumeration.
/// \return true if the redeclaration was invalid.
bool Sema::CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev) {
if (IsScoped != Prev->isScoped()) {
Diag(EnumLoc, diag::err_enum_redeclare_scoped_mismatch)
<< Prev->isScoped();
Diag(Prev->getLocation(), diag::note_previous_declaration);
return true;
}
if (IsFixed && Prev->isFixed()) {
if (!EnumUnderlyingTy->isDependentType() &&
!Prev->getIntegerType()->isDependentType() &&
!Context.hasSameUnqualifiedType(EnumUnderlyingTy,
Prev->getIntegerType())) {
// TODO: Highlight the underlying type of the redeclaration.
Diag(EnumLoc, diag::err_enum_redeclare_type_mismatch)
<< EnumUnderlyingTy << Prev->getIntegerType();
Diag(Prev->getLocation(), diag::note_previous_declaration)
<< Prev->getIntegerTypeRange();
return true;
}
} else if (IsFixed != Prev->isFixed()) {
Diag(EnumLoc, diag::err_enum_redeclare_fixed_mismatch)
<< Prev->isFixed();
Diag(Prev->getLocation(), diag::note_previous_declaration);
return true;
}
return false;
}
/// Get diagnostic %select index for tag kind for
/// redeclaration diagnostic message.
/// WARNING: Indexes apply to particular diagnostics only!
///
/// \returns diagnostic %select index.
static unsigned getRedeclDiagFromTagKind(TagTypeKind Tag) {
switch (Tag) {
case TTK_Struct: return 0;
case TTK_Interface: return 1;
case TTK_Class: return 2;
default: llvm_unreachable("Invalid tag kind for redecl diagnostic!");
}
}
/// Determine if tag kind is a class-key compatible with
/// class for redeclaration (class, struct, or __interface).
///
/// \returns true iff the tag kind is compatible.
static bool isClassCompatTagKind(TagTypeKind Tag)
{
return Tag == TTK_Struct || Tag == TTK_Class || Tag == TTK_Interface;
}
Sema::NonTagKind Sema::getNonTagTypeDeclKind(const Decl *PrevDecl,
TagTypeKind TTK) {
if (isa<TypedefDecl>(PrevDecl))
return NTK_Typedef;
else if (isa<TypeAliasDecl>(PrevDecl))
return NTK_TypeAlias;
else if (isa<ClassTemplateDecl>(PrevDecl))
return NTK_Template;
else if (isa<TypeAliasTemplateDecl>(PrevDecl))
return NTK_TypeAliasTemplate;
else if (isa<TemplateTemplateParmDecl>(PrevDecl))
return NTK_TemplateTemplateArgument;
switch (TTK) {
case TTK_Struct:
case TTK_Interface:
case TTK_Class:
return getLangOpts().CPlusPlus ? NTK_NonClass : NTK_NonStruct;
case TTK_Union:
return NTK_NonUnion;
case TTK_Enum:
return NTK_NonEnum;
}
llvm_unreachable("invalid TTK");
}
/// Determine whether a tag with a given kind is acceptable
/// as a redeclaration of the given tag declaration.
///
/// \returns true if the new tag kind is acceptable, false otherwise.
bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name) {
// C++ [dcl.type.elab]p3:
// The class-key or enum keyword present in the
// elaborated-type-specifier shall agree in kind with the
// declaration to which the name in the elaborated-type-specifier
// refers. This rule also applies to the form of
// elaborated-type-specifier that declares a class-name or
// friend class since it can be construed as referring to the
// definition of the class. Thus, in any
// elaborated-type-specifier, the enum keyword shall be used to
// refer to an enumeration (7.2), the union class-key shall be
// used to refer to a union (clause 9), and either the class or
// struct class-key shall be used to refer to a class (clause 9)
// declared using the class or struct class-key.
TagTypeKind OldTag = Previous->getTagKind();
if (!isDefinition || !isClassCompatTagKind(NewTag))
if (OldTag == NewTag)
return true;
if (isClassCompatTagKind(OldTag) && isClassCompatTagKind(NewTag)) {
// Warn about the struct/class tag mismatch.
bool isTemplate = false;
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Previous))
isTemplate = Record->getDescribedClassTemplate();
if (inTemplateInstantiation()) {
// In a template instantiation, do not offer fix-its for tag mismatches
// since they usually mess up the template instead of fixing the problem.
Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
<< getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
<< getRedeclDiagFromTagKind(OldTag);
return true;
}
if (isDefinition) {
// On definitions, check previous tags and issue a fix-it for each
// one that doesn't match the current tag.
if (Previous->getDefinition()) {
// Don't suggest fix-its for redefinitions.
return true;
}
bool previousMismatch = false;
for (auto I : Previous->redecls()) {
if (I->getTagKind() != NewTag) {
if (!previousMismatch) {
previousMismatch = true;
Diag(NewTagLoc, diag::warn_struct_class_previous_tag_mismatch)
<< getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
<< getRedeclDiagFromTagKind(I->getTagKind());
}
Diag(I->getInnerLocStart(), diag::note_struct_class_suggestion)
<< getRedeclDiagFromTagKind(NewTag)
<< FixItHint::CreateReplacement(I->getInnerLocStart(),
TypeWithKeyword::getTagTypeKindName(NewTag));
}
}
return true;
}
// Check for a previous definition. If current tag and definition
// are same type, do nothing. If no definition, but disagree with
// with previous tag type, give a warning, but no fix-it.
const TagDecl *Redecl = Previous->getDefinition() ?
Previous->getDefinition() : Previous;
if (Redecl->getTagKind() == NewTag) {
return true;
}
Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
<< getRedeclDiagFromTagKind(NewTag) << isTemplate << Name
<< getRedeclDiagFromTagKind(OldTag);
Diag(Redecl->getLocation(), diag::note_previous_use);
// If there is a previous definition, suggest a fix-it.
if (Previous->getDefinition()) {
Diag(NewTagLoc, diag::note_struct_class_suggestion)
<< getRedeclDiagFromTagKind(Redecl->getTagKind())
<< FixItHint::CreateReplacement(SourceRange(NewTagLoc),
TypeWithKeyword::getTagTypeKindName(Redecl->getTagKind()));
}
return true;
}
return false;
}
/// Add a minimal nested name specifier fixit hint to allow lookup of a tag name
/// from an outer enclosing namespace or file scope inside a friend declaration.
/// This should provide the commented out code in the following snippet:
/// namespace N {
/// struct X;
/// namespace M {
/// struct Y { friend struct /*N::*/ X; };
/// }
/// }
static FixItHint createFriendTagNNSFixIt(Sema &SemaRef, NamedDecl *ND, Scope *S,
SourceLocation NameLoc) {
// While the decl is in a namespace, do repeated lookup of that name and see
// if we get the same namespace back. If we do not, continue until
// translation unit scope, at which point we have a fully qualified NNS.
SmallVector<IdentifierInfo *, 4> Namespaces;
DeclContext *DC = ND->getDeclContext()->getRedeclContext();
for (; !DC->isTranslationUnit(); DC = DC->getParent()) {
// This tag should be declared in a namespace, which can only be enclosed by
// other namespaces. Bail if there's an anonymous namespace in the chain.
NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(DC);
if (!Namespace || Namespace->isAnonymousNamespace())
return FixItHint();
IdentifierInfo *II = Namespace->getIdentifier();
Namespaces.push_back(II);
NamedDecl *Lookup = SemaRef.LookupSingleName(
S, II, NameLoc, Sema::LookupNestedNameSpecifierName);
if (Lookup == Namespace)
break;
}
// Once we have all the namespaces, reverse them to go outermost first, and
// build an NNS.
SmallString<64> Insertion;
llvm::raw_svector_ostream OS(Insertion);
if (DC->isTranslationUnit())
OS << "::";
std::reverse(Namespaces.begin(), Namespaces.end());
for (auto *II : Namespaces)
OS << II->getName() << "::";
return FixItHint::CreateInsertion(NameLoc, Insertion);
}
/// Determine whether a tag originally declared in context \p OldDC can
/// be redeclared with an unqualified name in \p NewDC (assuming name lookup
/// found a declaration in \p OldDC as a previous decl, perhaps through a
/// using-declaration).
static bool isAcceptableTagRedeclContext(Sema &S, DeclContext *OldDC,
DeclContext *NewDC) {
OldDC = OldDC->getRedeclContext();
NewDC = NewDC->getRedeclContext();
if (OldDC->Equals(NewDC))
return true;
// In MSVC mode, we allow a redeclaration if the contexts are related (either
// encloses the other).
if (S.getLangOpts().MSVCCompat &&
(OldDC->Encloses(NewDC) || NewDC->Encloses(OldDC)))
return true;
return false;
}
/// This is invoked when we see 'struct foo' or 'struct {'. In the
/// former case, Name will be non-null. In the later case, Name will be null.
/// TagSpec indicates what kind of tag this is. TUK indicates whether this is a
/// reference/declaration/definition of a tag.
///
/// \param IsTypeSpecifier \c true if this is a type-specifier (or
/// trailing-type-specifier) other than one in an alias-declaration.
///
/// \param SkipBody If non-null, will be set to indicate if the caller should
/// skip the definition of this tag and treat it as if it were a declaration.
Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attrs, AccessSpecifier AS,
SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists,
bool &OwnedDecl, bool &IsDependent,
SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody) {
// If this is not a definition, it must have a name.
IdentifierInfo *OrigName = Name;
assert((Name != nullptr || TUK == TUK_Definition) &&
"Nameless record must be a definition!");
assert(TemplateParameterLists.size() == 0 || TUK != TUK_Reference);
OwnedDecl = false;
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
bool ScopedEnum = ScopedEnumKWLoc.isValid();
// FIXME: Check member specializations more carefully.
bool isMemberSpecialization = false;
bool Invalid = false;
// We only need to do this matching if we have template parameters
// or a scope specifier, which also conveniently avoids this work
// for non-C++ cases.
if (TemplateParameterLists.size() > 0 ||
(SS.isNotEmpty() && TUK != TUK_Reference)) {
if (TemplateParameterList *TemplateParams =
MatchTemplateParametersToScopeSpecifier(
KWLoc, NameLoc, SS, nullptr, TemplateParameterLists,
TUK == TUK_Friend, isMemberSpecialization, Invalid)) {
if (Kind == TTK_Enum) {
Diag(KWLoc, diag::err_enum_template);
return nullptr;
}
if (TemplateParams->size() > 0) {
// This is a declaration or definition of a class template (which may
// be a member of another template).
if (Invalid)
return nullptr;
OwnedDecl = false;
DeclResult Result = CheckClassTemplate(
S, TagSpec, TUK, KWLoc, SS, Name, NameLoc, Attrs, TemplateParams,
AS, ModulePrivateLoc,
/*FriendLoc*/ SourceLocation(), TemplateParameterLists.size() - 1,
TemplateParameterLists.data(), SkipBody);
return Result.get();
} else {
// The "template<>" header is extraneous.
Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams)
<< TypeWithKeyword::getTagTypeKindName(Kind) << Name;
isMemberSpecialization = true;
}
}
}
// Figure out the underlying type if this a enum declaration. We need to do
// this early, because it's needed to detect if this is an incompatible
// redeclaration.
llvm::PointerUnion<const Type*, TypeSourceInfo*> EnumUnderlying;
bool IsFixed = !UnderlyingType.isUnset() || ScopedEnum;
if (Kind == TTK_Enum) {
if (UnderlyingType.isInvalid() || (!UnderlyingType.get() && ScopedEnum)) {
// No underlying type explicitly specified, or we failed to parse the
// type, default to int.
EnumUnderlying = Context.IntTy.getTypePtr();
} else if (UnderlyingType.get()) {
// C++0x 7.2p2: The type-specifier-seq of an enum-base shall name an
// integral type; any cv-qualification is ignored.
TypeSourceInfo *TI = nullptr;
GetTypeFromParser(UnderlyingType.get(), &TI);
EnumUnderlying = TI;
if (CheckEnumUnderlyingType(TI))
// Recover by falling back to int.
EnumUnderlying = Context.IntTy.getTypePtr();
if (DiagnoseUnexpandedParameterPack(TI->getTypeLoc().getBeginLoc(), TI,
UPPC_FixedUnderlyingType))
EnumUnderlying = Context.IntTy.getTypePtr();
} else if (Context.getTargetInfo().getCXXABI().isMicrosoft()) {
// For MSVC ABI compatibility, unfixed enums must use an underlying type
// of 'int'. However, if this is an unfixed forward declaration, don't set
// the underlying type unless the user enables -fms-compatibility. This
// makes unfixed forward declared enums incomplete and is more conforming.
if (TUK == TUK_Definition || getLangOpts().MSVCCompat)
EnumUnderlying = Context.IntTy.getTypePtr();
}
}
DeclContext *SearchDC = CurContext;
DeclContext *DC = CurContext;
bool isStdBadAlloc = false;
bool isStdAlignValT = false;
RedeclarationKind Redecl = forRedeclarationInCurContext();
if (TUK == TUK_Friend || TUK == TUK_Reference)
Redecl = NotForRedeclaration;
/// Create a new tag decl in C/ObjC. Since the ODR-like semantics for ObjC/C
/// implemented asks for structural equivalence checking, the returned decl
/// here is passed back to the parser, allowing the tag body to be parsed.
auto createTagFromNewDecl = [&]() -> TagDecl * {
assert(!getLangOpts().CPlusPlus && "not meant for C++ usage");
// If there is an identifier, use the location of the identifier as the
// location of the decl, otherwise use the location of the struct/union
// keyword.
SourceLocation Loc = NameLoc.isValid() ? NameLoc : KWLoc;
TagDecl *New = nullptr;
if (Kind == TTK_Enum) {
New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name, nullptr,
ScopedEnum, ScopedEnumUsesClassTag, IsFixed);
// If this is an undefined enum, bail.
if (TUK != TUK_Definition && !Invalid)
return nullptr;
if (EnumUnderlying) {
EnumDecl *ED = cast<EnumDecl>(New);
if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo *>())
ED->setIntegerTypeSourceInfo(TI);
else
ED->setIntegerType(QualType(EnumUnderlying.get<const Type *>(), 0));
ED->setPromotionType(ED->getIntegerType());
}
} else { // struct/union
New = RecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name,
nullptr);
}
if (RecordDecl *RD = dyn_cast<RecordDecl>(New)) {
// Add alignment attributes if necessary; these attributes are checked
// when the ASTContext lays out the structure.
//
// It is important for implementing the correct semantics that this
// happen here (in ActOnTag). The #pragma pack stack is
// maintained as a result of parser callbacks which can occur at
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
if (TUK == TUK_Definition) {
AddAlignmentAttributesForRecord(RD);
AddMsStructLayoutForRecord(RD);
}
}
New->setLexicalDeclContext(CurContext);
return New;
};
LookupResult Previous(*this, Name, NameLoc, LookupTagName, Redecl);
if (Name && SS.isNotEmpty()) {
// We have a nested-name tag ('struct foo::bar').
// Check for invalid 'foo::'.
if (SS.isInvalid()) {
Name = nullptr;
goto CreateNewDecl;
}
// If this is a friend or a reference to a class in a dependent
// context, don't try to make a decl for it.
if (TUK == TUK_Friend || TUK == TUK_Reference) {
DC = computeDeclContext(SS, false);
if (!DC) {
IsDependent = true;
return nullptr;
}
} else {
DC = computeDeclContext(SS, true);
if (!DC) {
Diag(SS.getRange().getBegin(), diag::err_dependent_nested_name_spec)
<< SS.getRange();
return nullptr;
}
}
if (RequireCompleteDeclContext(SS, DC))
return nullptr;
SearchDC = DC;
// Look-up name inside 'foo::'.
LookupQualifiedName(Previous, DC);
if (Previous.isAmbiguous())
return nullptr;
if (Previous.empty()) {
// Name lookup did not find anything. However, if the
// nested-name-specifier refers to the current instantiation,
// and that current instantiation has any dependent base
// classes, we might find something at instantiation time: treat
// this as a dependent elaborated-type-specifier.
// But this only makes any sense for reference-like lookups.
if (Previous.wasNotFoundInCurrentInstantiation() &&
(TUK == TUK_Reference || TUK == TUK_Friend)) {
IsDependent = true;
return nullptr;
}
// A tag 'foo::bar' must already exist.
Diag(NameLoc, diag::err_not_tag_in_scope)
<< Kind << Name << DC << SS.getRange();
Name = nullptr;
Invalid = true;
goto CreateNewDecl;
}
} else if (Name) {
// C++14 [class.mem]p14:
// If T is the name of a class, then each of the following shall have a
// name different from T:
// -- every member of class T that is itself a type
if (TUK != TUK_Reference && TUK != TUK_Friend &&
DiagnoseClassNameShadow(SearchDC, DeclarationNameInfo(Name, NameLoc)))
return nullptr;
// If this is a named struct, check to see if there was a previous forward
// declaration or definition.
// FIXME: We're looking into outer scopes here, even when we
// shouldn't be. Doing so can result in ambiguities that we
// shouldn't be diagnosing.
LookupName(Previous, S);
// When declaring or defining a tag, ignore ambiguities introduced
// by types using'ed into this scope.
if (Previous.isAmbiguous() &&
(TUK == TUK_Definition || TUK == TUK_Declaration)) {
LookupResult::Filter F = Previous.makeFilter();
while (F.hasNext()) {
NamedDecl *ND = F.next();
if (!ND->getDeclContext()->getRedeclContext()->Equals(
SearchDC->getRedeclContext()))
F.erase();
}
F.done();
}
// C++11 [namespace.memdef]p3:
// If the name in a friend declaration is neither qualified nor
// a template-id and the declaration is a function or an
// elaborated-type-specifier, the lookup to determine whether
// the entity has been previously declared shall not consider
// any scopes outside the innermost enclosing namespace.
//
// MSVC doesn't implement the above rule for types, so a friend tag
// declaration may be a redeclaration of a type declared in an enclosing
// scope. They do implement this rule for friend functions.
//
// Does it matter that this should be by scope instead of by
// semantic context?
if (!Previous.empty() && TUK == TUK_Friend) {
DeclContext *EnclosingNS = SearchDC->getEnclosingNamespaceContext();
LookupResult::Filter F = Previous.makeFilter();
bool FriendSawTagOutsideEnclosingNamespace = false;
while (F.hasNext()) {
NamedDecl *ND = F.next();
DeclContext *DC = ND->getDeclContext()->getRedeclContext();
if (DC->isFileContext() &&
!EnclosingNS->Encloses(ND->getDeclContext())) {
if (getLangOpts().MSVCCompat)
FriendSawTagOutsideEnclosingNamespace = true;
else
F.erase();
}
}
F.done();
// Diagnose this MSVC extension in the easy case where lookup would have
// unambiguously found something outside the enclosing namespace.
if (Previous.isSingleResult() && FriendSawTagOutsideEnclosingNamespace) {
NamedDecl *ND = Previous.getFoundDecl();
Diag(NameLoc, diag::ext_friend_tag_redecl_outside_namespace)
<< createFriendTagNNSFixIt(*this, ND, S, NameLoc);
}
}
// Note: there used to be some attempt at recovery here.
if (Previous.isAmbiguous())
return nullptr;
if (!getLangOpts().CPlusPlus && TUK != TUK_Reference) {
// FIXME: This makes sure that we ignore the contexts associated
// with C structs, unions, and enums when looking for a matching
// tag declaration or definition. See the similar lookup tweak
// in Sema::LookupName; is there a better way to deal with this?
while (isa<RecordDecl>(SearchDC) || isa<EnumDecl>(SearchDC))
SearchDC = SearchDC->getParent();
}
}
if (Previous.isSingleResult() &&
Previous.getFoundDecl()->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(NameLoc, Previous.getFoundDecl());
// Just pretend that we didn't see the previous declaration.
Previous.clear();
}
if (getLangOpts().CPlusPlus && Name && DC && StdNamespace &&
DC->Equals(getStdNamespace())) {
if (Name->isStr("bad_alloc")) {
// This is a declaration of or a reference to "std::bad_alloc".
isStdBadAlloc = true;
// If std::bad_alloc has been implicitly declared (but made invisible to
// name lookup), fill in this implicit declaration as the previous
// declaration, so that the declarations get chained appropriately.
if (Previous.empty() && StdBadAlloc)
Previous.addDecl(getStdBadAlloc());
} else if (Name->isStr("align_val_t")) {
isStdAlignValT = true;
if (Previous.empty() && StdAlignValT)
Previous.addDecl(getStdAlignValT());
}
}
// If we didn't find a previous declaration, and this is a reference
// (or friend reference), move to the correct scope. In C++, we
// also need to do a redeclaration lookup there, just in case
// there's a shadow friend decl.
if (Name && Previous.empty() &&
(TUK == TUK_Reference || TUK == TUK_Friend || IsTemplateParamOrArg)) {
if (Invalid) goto CreateNewDecl;
assert(SS.isEmpty());
if (TUK == TUK_Reference || IsTemplateParamOrArg) {
// C++ [basic.scope.pdecl]p5:
// -- for an elaborated-type-specifier of the form
//
// class-key identifier
//
// if the elaborated-type-specifier is used in the
// decl-specifier-seq or parameter-declaration-clause of a
// function defined in namespace scope, the identifier is
// declared as a class-name in the namespace that contains
// the declaration; otherwise, except as a friend
// declaration, the identifier is declared in the smallest
// non-class, non-function-prototype scope that contains the
// declaration.
//
// C99 6.7.2.3p8 has a similar (but not identical!) provision for
// C structs and unions.
//
// It is an error in C++ to declare (rather than define) an enum
// type, including via an elaborated type specifier. We'll
// diagnose that later; for now, declare the enum in the same
// scope as we would have picked for any other tag type.
//
// GNU C also supports this behavior as part of its incomplete
// enum types extension, while GNU C++ does not.
//
// Find the context where we'll be declaring the tag.
// FIXME: We would like to maintain the current DeclContext as the
// lexical context,
SearchDC = getTagInjectionContext(SearchDC);
// Find the scope where we'll be declaring the tag.
S = getTagInjectionScope(S, getLangOpts());
} else {
assert(TUK == TUK_Friend);
// C++ [namespace.memdef]p3:
// If a friend declaration in a non-local class first declares a
// class or function, the friend class or function is a member of
// the innermost enclosing namespace.
SearchDC = SearchDC->getEnclosingNamespaceContext();
}
// In C++, we need to do a redeclaration lookup to properly
// diagnose some problems.
// FIXME: redeclaration lookup is also used (with and without C++) to find a
// hidden declaration so that we don't get ambiguity errors when using a
// type declared by an elaborated-type-specifier. In C that is not correct
// and we should instead merge compatible types found by lookup.
if (getLangOpts().CPlusPlus) {
Previous.setRedeclarationKind(forRedeclarationInCurContext());
LookupQualifiedName(Previous, SearchDC);
} else {
Previous.setRedeclarationKind(forRedeclarationInCurContext());
LookupName(Previous, S);
}
}
// If we have a known previous declaration to use, then use it.
if (Previous.empty() && SkipBody && SkipBody->Previous)
Previous.addDecl(SkipBody->Previous);
if (!Previous.empty()) {
NamedDecl *PrevDecl = Previous.getFoundDecl();
NamedDecl *DirectPrevDecl = Previous.getRepresentativeDecl();
// It's okay to have a tag decl in the same scope as a typedef
// which hides a tag decl in the same scope. Finding this
// insanity with a redeclaration lookup can only actually happen
// in C++.
//
// This is also okay for elaborated-type-specifiers, which is
// technically forbidden by the current standard but which is
// okay according to the likely resolution of an open issue;
// see http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#407
if (getLangOpts().CPlusPlus) {
if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(PrevDecl)) {
if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) {
TagDecl *Tag = TT->getDecl();
if (Tag->getDeclName() == Name &&
Tag->getDeclContext()->getRedeclContext()
->Equals(TD->getDeclContext()->getRedeclContext())) {
PrevDecl = Tag;
Previous.clear();
Previous.addDecl(Tag);
Previous.resolveKind();
}
}
}
}
// If this is a redeclaration of a using shadow declaration, it must
// declare a tag in the same context. In MSVC mode, we allow a
// redefinition if either context is within the other.
if (auto *Shadow = dyn_cast<UsingShadowDecl>(DirectPrevDecl)) {
auto *OldTag = dyn_cast<TagDecl>(PrevDecl);
if (SS.isEmpty() && TUK != TUK_Reference && TUK != TUK_Friend &&
isDeclInScope(Shadow, SearchDC, S, isMemberSpecialization) &&
!(OldTag && isAcceptableTagRedeclContext(
*this, OldTag->getDeclContext(), SearchDC))) {
Diag(KWLoc, diag::err_using_decl_conflict_reverse);
Diag(Shadow->getTargetDecl()->getLocation(),
diag::note_using_decl_target);
Diag(Shadow->getUsingDecl()->getLocation(), diag::note_using_decl)
<< 0;
// Recover by ignoring the old declaration.
Previous.clear();
goto CreateNewDecl;
}
}
if (TagDecl *PrevTagDecl = dyn_cast<TagDecl>(PrevDecl)) {
// If this is a use of a previous tag, or if the tag is already declared
// in the same scope (so that the definition/declaration completes or
// rementions the tag), reuse the decl.
if (TUK == TUK_Reference || TUK == TUK_Friend ||
isDeclInScope(DirectPrevDecl, SearchDC, S,
SS.isNotEmpty() || isMemberSpecialization)) {
// Make sure that this wasn't declared as an enum and now used as a
// struct or something similar.
if (!isAcceptableTagRedeclaration(PrevTagDecl, Kind,
TUK == TUK_Definition, KWLoc,
Name)) {
bool SafeToContinue
= (PrevTagDecl->getTagKind() != TTK_Enum &&
Kind != TTK_Enum);
if (SafeToContinue)
Diag(KWLoc, diag::err_use_with_wrong_tag)
<< Name
<< FixItHint::CreateReplacement(SourceRange(KWLoc),
PrevTagDecl->getKindName());
else
Diag(KWLoc, diag::err_use_with_wrong_tag) << Name;
Diag(PrevTagDecl->getLocation(), diag::note_previous_use);
if (SafeToContinue)
Kind = PrevTagDecl->getTagKind();
else {
// Recover by making this an anonymous redefinition.
Name = nullptr;
Previous.clear();
Invalid = true;
}
}
if (Kind == TTK_Enum && PrevTagDecl->getTagKind() == TTK_Enum) {
const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl);
// If this is an elaborated-type-specifier for a scoped enumeration,
// the 'class' keyword is not necessary and not permitted.
if (TUK == TUK_Reference || TUK == TUK_Friend) {
if (ScopedEnum)
Diag(ScopedEnumKWLoc, diag::err_enum_class_reference)
<< PrevEnum->isScoped()
<< FixItHint::CreateRemoval(ScopedEnumKWLoc);
return PrevTagDecl;
}
QualType EnumUnderlyingTy;
if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
EnumUnderlyingTy = TI->getType().getUnqualifiedType();
else if (const Type *T = EnumUnderlying.dyn_cast<const Type*>())
EnumUnderlyingTy = QualType(T, 0);
// All conflicts with previous declarations are recovered by
// returning the previous declaration, unless this is a definition,
// in which case we want the caller to bail out.
if (CheckEnumRedeclaration(NameLoc.isValid() ? NameLoc : KWLoc,
ScopedEnum, EnumUnderlyingTy,
IsFixed, PrevEnum))
return TUK == TUK_Declaration ? PrevTagDecl : nullptr;
}
// C++11 [class.mem]p1:
// A member shall not be declared twice in the member-specification,
// except that a nested class or member class template can be declared
// and then later defined.
if (TUK == TUK_Declaration && PrevDecl->isCXXClassMember() &&
S->isDeclScope(PrevDecl)) {
Diag(NameLoc, diag::ext_member_redeclared);
Diag(PrevTagDecl->getLocation(), diag::note_previous_declaration);
}
if (!Invalid) {
// If this is a use, just return the declaration we found, unless
// we have attributes.
if (TUK == TUK_Reference || TUK == TUK_Friend) {
if (!Attrs.empty()) {
// FIXME: Diagnose these attributes. For now, we create a new
// declaration to hold them.
} else if (TUK == TUK_Reference &&
(PrevTagDecl->getFriendObjectKind() ==
Decl::FOK_Undeclared ||
PrevDecl->getOwningModule() != getCurrentModule()) &&
SS.isEmpty()) {
// This declaration is a reference to an existing entity, but
// has different visibility from that entity: it either makes
// a friend visible or it makes a type visible in a new module.
// In either case, create a new declaration. We only do this if
// the declaration would have meant the same thing if no prior
// declaration were found, that is, if it was found in the same
// scope where we would have injected a declaration.
if (!getTagInjectionContext(CurContext)->getRedeclContext()
->Equals(PrevDecl->getDeclContext()->getRedeclContext()))
return PrevTagDecl;
// This is in the injected scope, create a new declaration in
// that scope.
S = getTagInjectionScope(S, getLangOpts());
} else {
return PrevTagDecl;
}
}
// Diagnose attempts to redefine a tag.
if (TUK == TUK_Definition) {
if (NamedDecl *Def = PrevTagDecl->getDefinition()) {
// If we're defining a specialization and the previous definition
// is from an implicit instantiation, don't emit an error
// here; we'll catch this in the general case below.
bool IsExplicitSpecializationAfterInstantiation = false;
if (isMemberSpecialization) {
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Def))
IsExplicitSpecializationAfterInstantiation =
RD->getTemplateSpecializationKind() !=
TSK_ExplicitSpecialization;
else if (EnumDecl *ED = dyn_cast<EnumDecl>(Def))
IsExplicitSpecializationAfterInstantiation =
ED->getTemplateSpecializationKind() !=
TSK_ExplicitSpecialization;
}
// Note that clang allows ODR-like semantics for ObjC/C, i.e., do
// not keep more that one definition around (merge them). However,
// ensure the decl passes the structural compatibility check in
// C11 6.2.7/1 (or 6.1.2.6/1 in C89).
NamedDecl *Hidden = nullptr;
if (SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
// There is a definition of this tag, but it is not visible. We
// explicitly make use of C++'s one definition rule here, and
// assume that this definition is identical to the hidden one
// we already have. Make the existing definition visible and
// use it in place of this one.
if (!getLangOpts().CPlusPlus) {
// Postpone making the old definition visible until after we
// complete parsing the new one and do the structural
// comparison.
SkipBody->CheckSameAsPrevious = true;
SkipBody->New = createTagFromNewDecl();
SkipBody->Previous = Hidden;
} else {
SkipBody->ShouldSkip = true;
makeMergedDefinitionVisible(Hidden);
}
return Def;
} else if (!IsExplicitSpecializationAfterInstantiation) {
// A redeclaration in function prototype scope in C isn't
// visible elsewhere, so merely issue a warning.
if (!getLangOpts().CPlusPlus && S->containedInPrototypeScope())
Diag(NameLoc, diag::warn_redefinition_in_param_list) << Name;
else
Diag(NameLoc, diag::err_redefinition) << Name;
notePreviousDefinition(Def,
NameLoc.isValid() ? NameLoc : KWLoc);
// If this is a redefinition, recover by making this
// struct be anonymous, which will make any later
// references get the previous definition.
Name = nullptr;
Previous.clear();
Invalid = true;
}
} else {
// If the type is currently being defined, complain
// about a nested redefinition.
auto *TD = Context.getTagDeclType(PrevTagDecl)->getAsTagDecl();
if (TD->isBeingDefined()) {
Diag(NameLoc, diag::err_nested_redefinition) << Name;
Diag(PrevTagDecl->getLocation(),
diag::note_previous_definition);
Name = nullptr;
Previous.clear();
Invalid = true;
}
}
// Okay, this is definition of a previously declared or referenced
// tag. We're going to create a new Decl for it.
}
// Okay, we're going to make a redeclaration. If this is some kind
// of reference, make sure we build the redeclaration in the same DC
// as the original, and ignore the current access specifier.
if (TUK == TUK_Friend || TUK == TUK_Reference) {
SearchDC = PrevTagDecl->getDeclContext();
AS = AS_none;
}
}
// If we get here we have (another) forward declaration or we
// have a definition. Just create a new decl.
} else {
// If we get here, this is a definition of a new tag type in a nested
// scope, e.g. "struct foo; void bar() { struct foo; }", just create a
// new decl/type. We set PrevDecl to NULL so that the entities
// have distinct types.
Previous.clear();
}
// If we get here, we're going to create a new Decl. If PrevDecl
// is non-NULL, it's a definition of the tag declared by
// PrevDecl. If it's NULL, we have a new definition.
// Otherwise, PrevDecl is not a tag, but was found with tag
// lookup. This is only actually possible in C++, where a few
// things like templates still live in the tag namespace.
} else {
// Use a better diagnostic if an elaborated-type-specifier
// found the wrong kind of type on the first
// (non-redeclaration) lookup.
if ((TUK == TUK_Reference || TUK == TUK_Friend) &&
!Previous.isForRedeclaration()) {
NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
Diag(NameLoc, diag::err_tag_reference_non_tag) << PrevDecl << NTK
<< Kind;
Diag(PrevDecl->getLocation(), diag::note_declared_at);
Invalid = true;
// Otherwise, only diagnose if the declaration is in scope.
} else if (!isDeclInScope(DirectPrevDecl, SearchDC, S,
SS.isNotEmpty() || isMemberSpecialization)) {
// do nothing
// Diagnose implicit declarations introduced by elaborated types.
} else if (TUK == TUK_Reference || TUK == TUK_Friend) {
NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
Diag(NameLoc, diag::err_tag_reference_conflict) << NTK;
Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl;
Invalid = true;
// Otherwise it's a declaration. Call out a particularly common
// case here.
} else if (TypedefNameDecl *TND = dyn_cast<TypedefNameDecl>(PrevDecl)) {
unsigned Kind = 0;
if (isa<TypeAliasDecl>(PrevDecl)) Kind = 1;
Diag(NameLoc, diag::err_tag_definition_of_typedef)
<< Name << Kind << TND->getUnderlyingType();
Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl;
Invalid = true;
// Otherwise, diagnose.
} else {
// The tag name clashes with something else in the target scope,
// issue an error and recover by making this tag be anonymous.
Diag(NameLoc, diag::err_redefinition_different_kind) << Name;
notePreviousDefinition(PrevDecl, NameLoc);
Name = nullptr;
Invalid = true;
}
// The existing declaration isn't relevant to us; we're in a
// new scope, so clear out the previous declaration.
Previous.clear();
}
}
CreateNewDecl:
TagDecl *PrevDecl = nullptr;
if (Previous.isSingleResult())
PrevDecl = cast<TagDecl>(Previous.getFoundDecl());
// If there is an identifier, use the location of the identifier as the
// location of the decl, otherwise use the location of the struct/union
// keyword.
SourceLocation Loc = NameLoc.isValid() ? NameLoc : KWLoc;
// Otherwise, create a new declaration. If there is a previous
// declaration of the same entity, the two will be linked via
// PrevDecl.
TagDecl *New;
if (Kind == TTK_Enum) {
// FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
// enum X { A, B, C } D; D should chain to X.
New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name,
cast_or_null<EnumDecl>(PrevDecl), ScopedEnum,
ScopedEnumUsesClassTag, IsFixed);
if (isStdAlignValT && (!StdAlignValT || getStdAlignValT()->isImplicit()))
StdAlignValT = cast<EnumDecl>(New);
// If this is an undefined enum, warn.
if (TUK != TUK_Definition && !Invalid) {
TagDecl *Def;
if (IsFixed && (getLangOpts().CPlusPlus11 || getLangOpts().ObjC2) &&
cast<EnumDecl>(New)->isFixed()) {
// C++0x: 7.2p2: opaque-enum-declaration.
// Conflicts are diagnosed above. Do nothing.
}
else if (PrevDecl && (Def = cast<EnumDecl>(PrevDecl)->getDefinition())) {
Diag(Loc, diag::ext_forward_ref_enum_def)
<< New;
Diag(Def->getLocation(), diag::note_previous_definition);
} else {
unsigned DiagID = diag::ext_forward_ref_enum;
if (getLangOpts().MSVCCompat)
DiagID = diag::ext_ms_forward_ref_enum;
else if (getLangOpts().CPlusPlus)
DiagID = diag::err_forward_ref_enum;
Diag(Loc, DiagID);
}
}
if (EnumUnderlying) {
EnumDecl *ED = cast<EnumDecl>(New);
if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
ED->setIntegerTypeSourceInfo(TI);
else
ED->setIntegerType(QualType(EnumUnderlying.get<const Type*>(), 0));
ED->setPromotionType(ED->getIntegerType());
assert(ED->isComplete() && "enum with type should be complete");
}
} else {
// struct/union/class
// FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
// struct X { int A; } D; D should chain to X.
if (getLangOpts().CPlusPlus) {
// FIXME: Look for a way to use RecordDecl for simple structs.
New = CXXRecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name,
cast_or_null<CXXRecordDecl>(PrevDecl));
if (isStdBadAlloc && (!StdBadAlloc || getStdBadAlloc()->isImplicit()))
StdBadAlloc = cast<CXXRecordDecl>(New);
} else
New = RecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name,
cast_or_null<RecordDecl>(PrevDecl));
}
// C++11 [dcl.type]p3:
// A type-specifier-seq shall not define a class or enumeration [...].
if (getLangOpts().CPlusPlus && (IsTypeSpecifier || IsTemplateParamOrArg) &&
TUK == TUK_Definition) {
Diag(New->getLocation(), diag::err_type_defined_in_type_specifier)
<< Context.getTagDeclType(New);
Invalid = true;
}
if (!Invalid && getLangOpts().CPlusPlus && TUK == TUK_Definition &&
DC->getDeclKind() == Decl::Enum) {
Diag(New->getLocation(), diag::err_type_defined_in_enum)
<< Context.getTagDeclType(New);
Invalid = true;
}
// Maybe add qualifier info.
if (SS.isNotEmpty()) {
if (SS.isSet()) {
// If this is either a declaration or a definition, check the
// nested-name-specifier against the current context.
if ((TUK == TUK_Definition || TUK == TUK_Declaration) &&
diagnoseQualifiedDeclaration(SS, DC, OrigName, Loc,
isMemberSpecialization))
Invalid = true;
New->setQualifierInfo(SS.getWithLocInContext(Context));
if (TemplateParameterLists.size() > 0) {
New->setTemplateParameterListsInfo(Context, TemplateParameterLists);
}
}
else
Invalid = true;
}
if (RecordDecl *RD = dyn_cast<RecordDecl>(New)) {
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
//
// It is important for implementing the correct semantics that this
// happen here (in ActOnTag). The #pragma pack stack is
// maintained as a result of parser callbacks which can occur at
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
if (TUK == TUK_Definition) {
AddAlignmentAttributesForRecord(RD);
AddMsStructLayoutForRecord(RD);
}
}
if (ModulePrivateLoc.isValid()) {
if (isMemberSpecialization)
Diag(New->getLocation(), diag::err_module_private_specialization)
<< 2
<< FixItHint::CreateRemoval(ModulePrivateLoc);
// __module_private__ does not apply to local classes. However, we only
// diagnose this as an error when the declaration specifiers are
// freestanding. Here, we just ignore the __module_private__.
else if (!SearchDC->isFunctionOrMethod())
New->setModulePrivate();
}
// If this is a specialization of a member class (of a class template),
// check the specialization.
if (isMemberSpecialization && CheckMemberSpecialization(New, Previous))
Invalid = true;
// If we're declaring or defining a tag in function prototype scope in C,
// note that this type can only be used within the function and add it to
// the list of decls to inject into the function definition scope.
if ((Name || Kind == TTK_Enum) &&
getNonFieldDeclScope(S)->isFunctionPrototypeScope()) {
if (getLangOpts().CPlusPlus) {
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
if (TUK == TUK_Definition && !IsTypeSpecifier) {
Diag(Loc, diag::err_type_defined_in_param_type)
<< Name;
Invalid = true;
}
} else if (!PrevDecl) {
Diag(Loc, diag::warn_decl_in_param_list) << Context.getTagDeclType(New);
}
}
if (Invalid)
New->setInvalidDecl();
// Set the lexical context. If the tag has a C++ scope specifier, the
// lexical context will be different from the semantic context.
New->setLexicalDeclContext(CurContext);
// Mark this as a friend decl if applicable.
// In Microsoft mode, a friend declaration also acts as a forward
// declaration so we always pass true to setObjectOfFriendDecl to make
// the tag name visible.
if (TUK == TUK_Friend)
New->setObjectOfFriendDecl(getLangOpts().MSVCCompat);
// Set the access specifier.
if (!Invalid && SearchDC->isRecord())
SetMemberAccessSpecifier(New, PrevDecl, AS);
if (PrevDecl)
CheckRedeclarationModuleOwnership(New, PrevDecl);
if (TUK == TUK_Definition)
New->startDefinition();
ProcessDeclAttributeList(S, New, Attrs);
AddPragmaAttributes(S, New);
// If this has an identifier, add it to the scope stack.
if (TUK == TUK_Friend) {
// We might be replacing an existing declaration in the lookup tables;
// if so, borrow its access specifier.
if (PrevDecl)
New->setAccess(PrevDecl->getAccess());
DeclContext *DC = New->getDeclContext()->getRedeclContext();
DC->makeDeclVisibleInContext(New);
if (Name) // can be null along some error paths
if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
PushOnScopeChains(New, EnclosingScope, /* AddToContext = */ false);
} else if (Name) {
S = getNonFieldDeclScope(S);
PushOnScopeChains(New, S, true);
} else {
CurContext->addDecl(New);
}
// If this is the C FILE type, notify the AST context.
if (IdentifierInfo *II = New->getIdentifier())
if (!New->isInvalidDecl() &&
New->getDeclContext()->getRedeclContext()->isTranslationUnit() &&
II->isStr("FILE"))
Context.setFILEDecl(New);
if (PrevDecl)
mergeDeclAttributes(New, PrevDecl);
// If there's a #pragma GCC visibility in scope, set the visibility of this
// record.
AddPushedVisibilityAttribute(New);
if (isMemberSpecialization && !New->isInvalidDecl())
CompleteMemberSpecialization(New, Previous);
OwnedDecl = true;
// In C++, don't return an invalid declaration. We can't recover well from
// the cases where we make the type anonymous.
if (Invalid && getLangOpts().CPlusPlus) {
if (New->isBeingDefined())
if (auto RD = dyn_cast<RecordDecl>(New))
RD->completeDefinition();
return nullptr;
} else {
return New;
}
}
void Sema::ActOnTagStartDefinition(Scope *S, Decl *TagD) {
AdjustDeclIfTemplate(TagD);
TagDecl *Tag = cast<TagDecl>(TagD);
// Enter the tag context.
PushDeclContext(S, Tag);
ActOnDocumentableDecl(TagD);
// If there's a #pragma GCC visibility in scope, set the visibility of this
// record.
AddPushedVisibilityAttribute(Tag);
}
bool Sema::ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody) {
if (!hasStructuralCompatLayout(Prev, SkipBody.New))
return false;
// Make the previous decl visible.
makeMergedDefinitionVisible(SkipBody.Previous);
return true;
}
Decl *Sema::ActOnObjCContainerStartDefinition(Decl *IDecl) {
assert(isa<ObjCContainerDecl>(IDecl) &&
"ActOnObjCContainerStartDefinition - Not ObjCContainerDecl");
DeclContext *OCD = cast<DeclContext>(IDecl);
assert(getContainingDC(OCD) == CurContext &&
"The next DeclContext should be lexically contained in the current one.");
CurContext = OCD;
return IDecl;
}
void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc) {
AdjustDeclIfTemplate(TagD);
CXXRecordDecl *Record = cast<CXXRecordDecl>(TagD);
FieldCollector->StartClass();
if (!Record->getIdentifier())
return;
if (FinalLoc.isValid())
Record->addAttr(new (Context)
FinalAttr(FinalLoc, Context, IsFinalSpelledSealed));
// C++ [class]p2:
// [...] The class-name is also inserted into the scope of the
// class itself; this is known as the injected-class-name. For
// purposes of access checking, the injected-class-name is treated
// as if it were a public member name.
CXXRecordDecl *InjectedClassName
= CXXRecordDecl::Create(Context, Record->getTagKind(), CurContext,
Record->getLocStart(), Record->getLocation(),
Record->getIdentifier(),
/*PrevDecl=*/nullptr,
/*DelayTypeCreation=*/true);
Context.getTypeDeclType(InjectedClassName, Record);
InjectedClassName->setImplicit();
InjectedClassName->setAccess(AS_public);
if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate())
InjectedClassName->setDescribedClassTemplate(Template);
PushOnScopeChains(InjectedClassName, S);
assert(InjectedClassName->isInjectedClassName() &&
"Broken injected-class-name");
}
void Sema::ActOnTagFinishDefinition(Scope *S, Decl *TagD,
SourceRange BraceRange) {
AdjustDeclIfTemplate(TagD);
TagDecl *Tag = cast<TagDecl>(TagD);
Tag->setBraceRange(BraceRange);
// Make sure we "complete" the definition even it is invalid.
if (Tag->isBeingDefined()) {
assert(Tag->isInvalidDecl() && "We should already have completed it");
if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag))
RD->completeDefinition();
}
if (isa<CXXRecordDecl>(Tag)) {
FieldCollector->FinishClass();
}
// Exit this scope of this tag's definition.
PopDeclContext();
if (getCurLexicalContext()->isObjCContainer() &&
Tag->getDeclContext()->isFileContext())
Tag->setTopLevelDeclInObjCContainer();
// Notify the consumer that we've defined a tag.
if (!Tag->isInvalidDecl())
Consumer.HandleTagDeclDefinition(Tag);
}
void Sema::ActOnObjCContainerFinishDefinition() {
// Exit this scope of this interface definition.
PopDeclContext();
}
void Sema::ActOnObjCTemporaryExitContainerContext(DeclContext *DC) {
assert(DC == CurContext && "Mismatch of container contexts");
OriginalLexicalContext = DC;
ActOnObjCContainerFinishDefinition();
}
void Sema::ActOnObjCReenterContainerContext(DeclContext *DC) {
ActOnObjCContainerStartDefinition(cast<Decl>(DC));
OriginalLexicalContext = nullptr;
}
void Sema::ActOnTagDefinitionError(Scope *S, Decl *TagD) {
AdjustDeclIfTemplate(TagD);
TagDecl *Tag = cast<TagDecl>(TagD);
Tag->setInvalidDecl();
// Make sure we "complete" the definition even it is invalid.
if (Tag->isBeingDefined()) {
if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag))
RD->completeDefinition();
}
// We're undoing ActOnTagStartDefinition here, not
// ActOnStartCXXMemberDeclarations, so we don't have to mess with
// the FieldCollector.
PopDeclContext();
}
// Note that FieldName may be null for anonymous bitfields.
ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth) {
// Default to true; that shouldn't confuse checks for emptiness
if (ZeroWidth)
*ZeroWidth = true;
// C99 6.7.2.1p4 - verify the field type.
// C++ 9.6p3: A bit-field shall have integral or enumeration type.
if (!FieldTy->isDependentType() && !FieldTy->isIntegralOrEnumerationType()) {
// Handle incomplete types with specific error.
if (RequireCompleteType(FieldLoc, FieldTy, diag::err_field_incomplete))
return ExprError();
if (FieldName)
return Diag(FieldLoc, diag::err_not_integral_type_bitfield)
<< FieldName << FieldTy << BitWidth->getSourceRange();
return Diag(FieldLoc, diag::err_not_integral_type_anon_bitfield)
<< FieldTy << BitWidth->getSourceRange();
} else if (DiagnoseUnexpandedParameterPack(const_cast<Expr *>(BitWidth),
UPPC_BitFieldWidth))
return ExprError();
// If the bit-width is type- or value-dependent, don't try to check
// it now.
if (BitWidth->isValueDependent() || BitWidth->isTypeDependent())
return BitWidth;
llvm::APSInt Value;
ExprResult ICE = VerifyIntegerConstantExpression(BitWidth, &Value);
if (ICE.isInvalid())
return ICE;
BitWidth = ICE.get();
if (Value != 0 && ZeroWidth)
*ZeroWidth = false;
// Zero-width bitfield is ok for anonymous field.
if (Value == 0 && FieldName)
return Diag(FieldLoc, diag::err_bitfield_has_zero_width) << FieldName;
if (Value.isSigned() && Value.isNegative()) {
if (FieldName)
return Diag(FieldLoc, diag::err_bitfield_has_negative_width)
<< FieldName << Value.toString(10);
return Diag(FieldLoc, diag::err_anon_bitfield_has_negative_width)
<< Value.toString(10);
}
if (!FieldTy->isDependentType()) {
uint64_t TypeStorageSize = Context.getTypeSize(FieldTy);
uint64_t TypeWidth = Context.getIntWidth(FieldTy);
bool BitfieldIsOverwide = Value.ugt(TypeWidth);
// Over-wide bitfields are an error in C or when using the MSVC bitfield
// ABI.
bool CStdConstraintViolation =
BitfieldIsOverwide && !getLangOpts().CPlusPlus;
bool MSBitfieldViolation =
Value.ugt(TypeStorageSize) &&
(IsMsStruct || Context.getTargetInfo().getCXXABI().isMicrosoft());
if (CStdConstraintViolation || MSBitfieldViolation) {
unsigned DiagWidth =
CStdConstraintViolation ? TypeWidth : TypeStorageSize;
if (FieldName)
return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_width)
<< FieldName << (unsigned)Value.getZExtValue()
<< !CStdConstraintViolation << DiagWidth;
return Diag(FieldLoc, diag::err_anon_bitfield_width_exceeds_type_width)
<< (unsigned)Value.getZExtValue() << !CStdConstraintViolation
<< DiagWidth;
}
// Warn on types where the user might conceivably expect to get all
// specified bits as value bits: that's all integral types other than
// 'bool'.
if (BitfieldIsOverwide && !FieldTy->isBooleanType()) {
if (FieldName)
Diag(FieldLoc, diag::warn_bitfield_width_exceeds_type_width)
<< FieldName << (unsigned)Value.getZExtValue()
<< (unsigned)TypeWidth;
else
Diag(FieldLoc, diag::warn_anon_bitfield_width_exceeds_type_width)
<< (unsigned)Value.getZExtValue() << (unsigned)TypeWidth;
}
}
return BitWidth;
}
/// ActOnField - Each field of a C struct/union is passed into this in order
/// to create a FieldDecl object for it.
Decl *Sema::ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth) {
FieldDecl *Res = HandleField(S, cast_or_null<RecordDecl>(TagD),
DeclStart, D, static_cast<Expr*>(BitfieldWidth),
/*InitStyle=*/ICIS_NoInit, AS_public);
return Res;
}
/// HandleField - Analyze a field of a C struct or a C++ data member.
///
FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
SourceLocation DeclStart,
Declarator &D, Expr *BitWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS) {
if (D.isDecompositionDeclarator()) {
const DecompositionDeclarator &Decomp = D.getDecompositionDeclarator();
Diag(Decomp.getLSquareLoc(), diag::err_decomp_decl_context)
<< Decomp.getSourceRange();
return nullptr;
}
IdentifierInfo *II = D.getIdentifier();
SourceLocation Loc = DeclStart;
if (II) Loc = D.getIdentifierLoc();
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType T = TInfo->getType();
if (getLangOpts().CPlusPlus) {
CheckExtraCXXDefaultArguments(D);
if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
UPPC_DataMemberType)) {
D.setInvalidType();
T = Context.IntTy;
TInfo = Context.getTrivialTypeSourceInfo(T, Loc);
}
}
// TR 18037 does not allow fields to be declared with address spaces.
if (T.getQualifiers().hasAddressSpace() ||
T->isDependentAddressSpaceType() ||
T->getBaseElementTypeUnsafe()->isDependentAddressSpaceType()) {
Diag(Loc, diag::err_field_with_address_space);
D.setInvalidType();
}
// OpenCL v1.2 s6.9b,r & OpenCL v2.0 s6.12.5 - The following types cannot be
// used as structure or union field: image, sampler, event or block types.
if (LangOpts.OpenCL && (T->isEventT() || T->isImageType() ||
T->isSamplerT() || T->isBlockPointerType())) {
Diag(Loc, diag::err_opencl_type_struct_or_union_field) << T;
D.setInvalidType();
}
DiagnoseFunctionSpecifiers(D.getDeclSpec());
if (D.getDeclSpec().isInlineSpecified())
Diag(D.getDeclSpec().getInlineSpecLoc(), diag::err_inline_non_function)
<< getLangOpts().CPlusPlus17;
if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec())
Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
diag::err_invalid_thread)
<< DeclSpec::getSpecifierName(TSCS);
// Check to see if this name was declared as a member previously
NamedDecl *PrevDecl = nullptr;
LookupResult Previous(*this, II, Loc, LookupMemberName,
ForVisibleRedeclaration);
LookupName(Previous, S);
switch (Previous.getResultKind()) {
case LookupResult::Found:
case LookupResult::FoundUnresolvedValue:
PrevDecl = Previous.getAsSingle<NamedDecl>();
break;
case LookupResult::FoundOverloaded:
PrevDecl = Previous.getRepresentativeDecl();
break;
case LookupResult::NotFound:
case LookupResult::NotFoundInCurrentInstantiation:
case LookupResult::Ambiguous:
break;
}
Previous.suppressDiagnostics();
if (PrevDecl && PrevDecl->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
// Just pretend that we didn't see the previous declaration.
PrevDecl = nullptr;
}
if (PrevDecl && !isDeclInScope(PrevDecl, Record, S))
PrevDecl = nullptr;
bool Mutable
= (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_mutable);
SourceLocation TSSL = D.getLocStart();
FieldDecl *NewFD
= CheckFieldDecl(II, T, TInfo, Record, Loc, Mutable, BitWidth, InitStyle,
TSSL, AS, PrevDecl, &D);
if (NewFD->isInvalidDecl())
Record->setInvalidDecl();
if (D.getDeclSpec().isModulePrivateSpecified())
NewFD->setModulePrivate();
if (NewFD->isInvalidDecl() && PrevDecl) {
// Don't introduce NewFD into scope; there's already something
// with the same name in the same scope.
} else if (II) {
PushOnScopeChains(NewFD, S);
} else
Record->addDecl(NewFD);
return NewFD;
}
/// Build a new FieldDecl and check its well-formedness.
///
/// This routine builds a new FieldDecl given the fields name, type,
/// record, etc. \p PrevDecl should refer to any previous declaration
/// with the same name and in the same scope as the field to be
/// created.
///
/// \returns a new FieldDecl.
///
/// \todo The Declarator argument is a hack. It will be removed once
FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D) {
IdentifierInfo *II = Name.getAsIdentifierInfo();
bool InvalidDecl = false;
if (D) InvalidDecl = D->isInvalidType();
// If we receive a broken type, recover by assuming 'int' and
// marking this declaration as invalid.
if (T.isNull()) {
InvalidDecl = true;
T = Context.IntTy;
}
QualType EltTy = Context.getBaseElementType(T);
if (!EltTy->isDependentType()) {
if (RequireCompleteType(Loc, EltTy, diag::err_field_incomplete)) {
// Fields of incomplete type force their record to be invalid.
Record->setInvalidDecl();
InvalidDecl = true;
} else {
NamedDecl *Def;
EltTy->isIncompleteType(&Def);
if (Def && Def->isInvalidDecl()) {
Record->setInvalidDecl();
InvalidDecl = true;
}
}
}
// OpenCL v1.2 s6.9.c: bitfields are not supported.
if (BitWidth && getLangOpts().OpenCL) {
Diag(Loc, diag::err_opencl_bitfields);
InvalidDecl = true;
}
// Anonymous bit-fields cannot be cv-qualified (CWG 2229).
if (!InvalidDecl && getLangOpts().CPlusPlus && !II && BitWidth &&
T.hasQualifiers()) {
InvalidDecl = true;
Diag(Loc, diag::err_anon_bitfield_qualifiers);
}
// C99 6.7.2.1p8: A member of a structure or union may have any type other
// than a variably modified type.
if (!InvalidDecl && T->isVariablyModifiedType()) {
bool SizeIsNegative;
llvm::APSInt Oversized;
TypeSourceInfo *FixedTInfo =
TryToFixInvalidVariablyModifiedTypeSourceInfo(TInfo, Context,
SizeIsNegative,
Oversized);
if (FixedTInfo) {
Diag(Loc, diag::warn_illegal_constant_array_size);
TInfo = FixedTInfo;
T = FixedTInfo->getType();
} else {
if (SizeIsNegative)
Diag(Loc, diag::err_typecheck_negative_array_size);
else if (Oversized.getBoolValue())
Diag(Loc, diag::err_array_too_large)
<< Oversized.toString(10);
else
Diag(Loc, diag::err_typecheck_field_variable_size);
InvalidDecl = true;
}
}
// Fields can not have abstract class types
if (!InvalidDecl && RequireNonAbstractType(Loc, T,
diag::err_abstract_type_in_decl,
AbstractFieldType))
InvalidDecl = true;
bool ZeroWidth = false;
if (InvalidDecl)
BitWidth = nullptr;
// If this is declared as a bit-field, check the bit-field.
if (BitWidth) {
BitWidth = VerifyBitField(Loc, II, T, Record->isMsStruct(Context), BitWidth,
&ZeroWidth).get();
if (!BitWidth) {
InvalidDecl = true;
BitWidth = nullptr;
ZeroWidth = false;
}
}
// Check that 'mutable' is consistent with the type of the declaration.
if (!InvalidDecl && Mutable) {
unsigned DiagID = 0;
if (T->isReferenceType())
DiagID = getLangOpts().MSVCCompat ? diag::ext_mutable_reference
: diag::err_mutable_reference;
else if (T.isConstQualified())
DiagID = diag::err_mutable_const;
if (DiagID) {
SourceLocation ErrLoc = Loc;
if (D && D->getDeclSpec().getStorageClassSpecLoc().isValid())
ErrLoc = D->getDeclSpec().getStorageClassSpecLoc();
Diag(ErrLoc, DiagID);
if (DiagID != diag::ext_mutable_reference) {
Mutable = false;
InvalidDecl = true;
}
}
}
// C++11 [class.union]p8 (DR1460):
// At most one variant member of a union may have a
// brace-or-equal-initializer.
if (InitStyle != ICIS_NoInit)
checkDuplicateDefaultInit(*this, cast<CXXRecordDecl>(Record), Loc);
FieldDecl *NewFD = FieldDecl::Create(Context, Record, TSSL, Loc, II, T, TInfo,
BitWidth, Mutable, InitStyle);
if (InvalidDecl)
NewFD->setInvalidDecl();
if (PrevDecl && !isa<TagDecl>(PrevDecl)) {
Diag(Loc, diag::err_duplicate_member) << II;
Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
}
if (!InvalidDecl && getLangOpts().CPlusPlus) {
if (Record->isUnion()) {
if (const RecordType *RT = EltTy->getAs<RecordType>()) {
CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl());
if (RDecl->getDefinition()) {
// C++ [class.union]p1: An object of a class with a non-trivial
// constructor, a non-trivial copy constructor, a non-trivial
// destructor, or a non-trivial copy assignment operator
// cannot be a member of a union, nor can an array of such
// objects.
if (CheckNontrivialField(NewFD))
NewFD->setInvalidDecl();
}
}
// C++ [class.union]p1: If a union contains a member of reference type,
// the program is ill-formed, except when compiling with MSVC extensions
// enabled.
if (EltTy->isReferenceType()) {
Diag(NewFD->getLocation(), getLangOpts().MicrosoftExt ?
diag::ext_union_member_of_reference_type :
diag::err_union_member_of_reference_type)
<< NewFD->getDeclName() << EltTy;
if (!getLangOpts().MicrosoftExt)
NewFD->setInvalidDecl();
}
}
}
// FIXME: We need to pass in the attributes given an AST
// representation, not a parser representation.
if (D) {
// FIXME: The current scope is almost... but not entirely... correct here.
ProcessDeclAttributes(getCurScope(), NewFD, *D);
if (NewFD->hasAttrs())
CheckAlignasUnderalignment(NewFD);
}
// In auto-retain/release, infer strong retension for fields of
// retainable type.
if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewFD))
NewFD->setInvalidDecl();
if (T.isObjCGCWeak())
Diag(Loc, diag::warn_attribute_weak_on_field);
NewFD->setAccess(AS);
return NewFD;
}
bool Sema::CheckNontrivialField(FieldDecl *FD) {
assert(FD);
assert(getLangOpts().CPlusPlus && "valid check only for C++");
if (FD->isInvalidDecl() || FD->getType()->isDependentType())
return false;
QualType EltTy = Context.getBaseElementType(FD->getType());
if (const RecordType *RT = EltTy->getAs<RecordType>()) {
CXXRecordDecl *RDecl = cast<CXXRecordDecl>(RT->getDecl());
if (RDecl->getDefinition()) {
// We check for copy constructors before constructors
// because otherwise we'll never get complaints about
// copy constructors.
CXXSpecialMember member = CXXInvalid;
// We're required to check for any non-trivial constructors. Since the
// implicit default constructor is suppressed if there are any
// user-declared constructors, we just need to check that there is a
// trivial default constructor and a trivial copy constructor. (We don't
// worry about move constructors here, since this is a C++98 check.)
if (RDecl->hasNonTrivialCopyConstructor())
member = CXXCopyConstructor;
else if (!RDecl->hasTrivialDefaultConstructor())
member = CXXDefaultConstructor;
else if (RDecl->hasNonTrivialCopyAssignment())
member = CXXCopyAssignment;
else if (RDecl->hasNonTrivialDestructor())
member = CXXDestructor;
if (member != CXXInvalid) {
if (!getLangOpts().CPlusPlus11 &&
getLangOpts().ObjCAutoRefCount && RDecl->hasObjectMember()) {
// Objective-C++ ARC: it is an error to have a non-trivial field of
// a union. However, system headers in Objective-C programs
// occasionally have Objective-C lifetime objects within unions,
// and rather than cause the program to fail, we make those
// members unavailable.
SourceLocation Loc = FD->getLocation();
if (getSourceManager().isInSystemHeader(Loc)) {
if (!FD->hasAttr<UnavailableAttr>())
FD->addAttr(UnavailableAttr::CreateImplicit(Context, "",
UnavailableAttr::IR_ARCFieldWithOwnership, Loc));
return false;
}
}
Diag(FD->getLocation(), getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member :
diag::err_illegal_union_or_anon_struct_member)
<< FD->getParent()->isUnion() << FD->getDeclName() << member;
DiagnoseNontrivial(RDecl, member);
return !getLangOpts().CPlusPlus11;
}
}
}
return false;
}
/// TranslateIvarVisibility - Translate visibility from a token ID to an
/// AST enum value.
static ObjCIvarDecl::AccessControl
TranslateIvarVisibility(tok::ObjCKeywordKind ivarVisibility) {
switch (ivarVisibility) {
default: llvm_unreachable("Unknown visitibility kind");
case tok::objc_private: return ObjCIvarDecl::Private;
case tok::objc_public: return ObjCIvarDecl::Public;
case tok::objc_protected: return ObjCIvarDecl::Protected;
case tok::objc_package: return ObjCIvarDecl::Package;
}
}
/// ActOnIvar - Each ivar field of an objective-c class is passed into this
/// in order to create an IvarDecl object for it.
Decl *Sema::ActOnIvar(Scope *S,
SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind Visibility) {
IdentifierInfo *II = D.getIdentifier();
Expr *BitWidth = (Expr*)BitfieldWidth;
SourceLocation Loc = DeclStart;
if (II) Loc = D.getIdentifierLoc();
// FIXME: Unnamed fields can be handled in various different ways, for
// example, unnamed unions inject all members into the struct namespace!
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
QualType T = TInfo->getType();
if (BitWidth) {
// 6.7.2.1p3, 6.7.2.1p4
BitWidth = VerifyBitField(Loc, II, T, /*IsMsStruct*/false, BitWidth).get();
if (!BitWidth)
D.setInvalidType();
} else {
// Not a bitfield.
// validate II.
}
if (T->isReferenceType()) {
Diag(Loc, diag::err_ivar_reference_type);
D.setInvalidType();
}
// C99 6.7.2.1p8: A member of a structure or union may have any type other
// than a variably modified type.
else if (T->isVariablyModifiedType()) {
Diag(Loc, diag::err_typecheck_ivar_variable_size);
D.setInvalidType();
}
// Get the visibility (access control) for this ivar.
ObjCIvarDecl::AccessControl ac =
Visibility != tok::objc_not_keyword ? TranslateIvarVisibility(Visibility)
: ObjCIvarDecl::None;
// Must set ivar's DeclContext to its enclosing interface.
ObjCContainerDecl *EnclosingDecl = cast<ObjCContainerDecl>(CurContext);
if (!EnclosingDecl || EnclosingDecl->isInvalidDecl())
return nullptr;
ObjCContainerDecl *EnclosingContext;
if (ObjCImplementationDecl *IMPDecl =
dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
if (LangOpts.ObjCRuntime.isFragile()) {
// Case of ivar declared in an implementation. Context is that of its class.
EnclosingContext = IMPDecl->getClassInterface();
assert(EnclosingContext && "Implementation has no class interface!");
}
else
EnclosingContext = EnclosingDecl;
} else {
if (ObjCCategoryDecl *CDecl =
dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
if (LangOpts.ObjCRuntime.isFragile() || !CDecl->IsClassExtension()) {
Diag(Loc, diag::err_misplaced_ivar) << CDecl->IsClassExtension();
return nullptr;
}
}
EnclosingContext = EnclosingDecl;
}
// Construct the decl.
ObjCIvarDecl *NewID = ObjCIvarDecl::Create(Context, EnclosingContext,
DeclStart, Loc, II, T,
TInfo, ac, (Expr *)BitfieldWidth);
if (II) {
NamedDecl *PrevDecl = LookupSingleName(S, II, Loc, LookupMemberName,
ForVisibleRedeclaration);
if (PrevDecl && isDeclInScope(PrevDecl, EnclosingContext, S)
&& !isa<TagDecl>(PrevDecl)) {
Diag(Loc, diag::err_duplicate_member) << II;
Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
NewID->setInvalidDecl();
}
}
// Process attributes attached to the ivar.
ProcessDeclAttributes(S, NewID, D);
if (D.isInvalidType())
NewID->setInvalidDecl();
// In ARC, infer 'retaining' for ivars of retainable type.
if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewID))
NewID->setInvalidDecl();
if (D.getDeclSpec().isModulePrivateSpecified())
NewID->setModulePrivate();
if (II) {
// FIXME: When interfaces are DeclContexts, we'll need to add
// these to the interface.
S->AddDecl(NewID);
IdResolver.AddDecl(NewID);
}
if (LangOpts.ObjCRuntime.isNonFragile() &&
!NewID->isInvalidDecl() && isa<ObjCInterfaceDecl>(EnclosingDecl))
Diag(Loc, diag::warn_ivars_in_interface);
return NewID;
}
/// ActOnLastBitfield - This routine handles synthesized bitfields rules for
/// class and class extensions. For every class \@interface and class
/// extension \@interface, if the last ivar is a bitfield of any type,
/// then add an implicit `char :0` ivar to the end of that interface.
void Sema::ActOnLastBitfield(SourceLocation DeclLoc,
SmallVectorImpl<Decl *> &AllIvarDecls) {
if (LangOpts.ObjCRuntime.isFragile() || AllIvarDecls.empty())
return;
Decl *ivarDecl = AllIvarDecls[AllIvarDecls.size()-1];
ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(ivarDecl);
if (!Ivar->isBitField() || Ivar->isZeroLengthBitField(Context))
return;
ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CurContext);
if (!ID) {
if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(CurContext)) {
if (!CD->IsClassExtension())
return;
}
// No need to add this to end of @implementation.
else
return;
}
// All conditions are met. Add a new bitfield to the tail end of ivars.
llvm::APInt Zero(Context.getTypeSize(Context.IntTy), 0);
Expr * BW = IntegerLiteral::Create(Context, Zero, Context.IntTy, DeclLoc);
Ivar = ObjCIvarDecl::Create(Context, cast<ObjCContainerDecl>(CurContext),
DeclLoc, DeclLoc, nullptr,
Context.CharTy,
Context.getTrivialTypeSourceInfo(Context.CharTy,
DeclLoc),
ObjCIvarDecl::Private, BW,
true);
AllIvarDecls.push_back(Ivar);
}
void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &Attrs) {
assert(EnclosingDecl && "missing record or interface decl");
// If this is an Objective-C @implementation or category and we have
// new fields here we should reset the layout of the interface since
// it will now change.
if (!Fields.empty() && isa<ObjCContainerDecl>(EnclosingDecl)) {
ObjCContainerDecl *DC = cast<ObjCContainerDecl>(EnclosingDecl);
switch (DC->getKind()) {
default: break;
case Decl::ObjCCategory:
Context.ResetObjCLayout(cast<ObjCCategoryDecl>(DC)->getClassInterface());
break;
case Decl::ObjCImplementation:
Context.
ResetObjCLayout(cast<ObjCImplementationDecl>(DC)->getClassInterface());
break;
}
}
RecordDecl *Record = dyn_cast<RecordDecl>(EnclosingDecl);
// Start counting up the number of named members; make sure to include
// members of anonymous structs and unions in the total.
unsigned NumNamedMembers = 0;
if (Record) {
for (const auto *I : Record->decls()) {
if (const auto *IFD = dyn_cast<IndirectFieldDecl>(I))
if (IFD->getDeclName())
++NumNamedMembers;
}
}
// Verify that all the fields are okay.
SmallVector<FieldDecl*, 32> RecFields;
bool ObjCFieldLifetimeErrReported = false;
for (ArrayRef<Decl *>::iterator i = Fields.begin(), end = Fields.end();
i != end; ++i) {
FieldDecl *FD = cast<FieldDecl>(*i);
// Get the type for the field.
const Type *FDTy = FD->getType().getTypePtr();
if (!FD->isAnonymousStructOrUnion()) {
// Remember all fields written by the user.
RecFields.push_back(FD);
}
// If the field is already invalid for some reason, don't emit more
// diagnostics about it.
if (FD->isInvalidDecl()) {
EnclosingDecl->setInvalidDecl();
continue;
}
// C99 6.7.2.1p2:
// A structure or union shall not contain a member with
// incomplete or function type (hence, a structure shall not
// contain an instance of itself, but may contain a pointer to
// an instance of itself), except that the last member of a
// structure with more than one named member may have incomplete
// array type; such a structure (and any union containing,
// possibly recursively, a member that is such a structure)
// shall not be a member of a structure or an element of an
// array.
bool IsLastField = (i + 1 == Fields.end());
if (FDTy->isFunctionType()) {
// Field declared as a function.
Diag(FD->getLocation(), diag::err_field_declared_as_function)
<< FD->getDeclName();
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
continue;
} else if (FDTy->isIncompleteArrayType() &&
(Record || isa<ObjCContainerDecl>(EnclosingDecl))) {
if (Record) {
// Flexible array member.
// Microsoft and g++ is more permissive regarding flexible array.
// It will accept flexible array in union and also
// as the sole element of a struct/class.
unsigned DiagID = 0;
if (!Record->isUnion() && !IsLastField) {
Diag(FD->getLocation(), diag::err_flexible_array_not_at_end)
<< FD->getDeclName() << FD->getType() << Record->getTagKind();
Diag((*(i + 1))->getLocation(), diag::note_next_field_declaration);
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
continue;
} else if (Record->isUnion())
DiagID = getLangOpts().MicrosoftExt
? diag::ext_flexible_array_union_ms
: getLangOpts().CPlusPlus
? diag::ext_flexible_array_union_gnu
: diag::err_flexible_array_union;
else if (NumNamedMembers < 1)
DiagID = getLangOpts().MicrosoftExt
? diag::ext_flexible_array_empty_aggregate_ms
: getLangOpts().CPlusPlus
? diag::ext_flexible_array_empty_aggregate_gnu
: diag::err_flexible_array_empty_aggregate;
if (DiagID)
Diag(FD->getLocation(), DiagID) << FD->getDeclName()
<< Record->getTagKind();
// While the layout of types that contain virtual bases is not specified
// by the C++ standard, both the Itanium and Microsoft C++ ABIs place
// virtual bases after the derived members. This would make a flexible
// array member declared at the end of an object not adjacent to the end
// of the type.
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Record))
if (RD->getNumVBases() != 0)
Diag(FD->getLocation(), diag::err_flexible_array_virtual_base)
<< FD->getDeclName() << Record->getTagKind();
if (!getLangOpts().C99)
Diag(FD->getLocation(), diag::ext_c99_flexible_array_member)
<< FD->getDeclName() << Record->getTagKind();
// If the element type has a non-trivial destructor, we would not
// implicitly destroy the elements, so disallow it for now.
//
// FIXME: GCC allows this. We should probably either implicitly delete
// the destructor of the containing class, or just allow this.
QualType BaseElem = Context.getBaseElementType(FD->getType());
if (!BaseElem->isDependentType() && BaseElem.isDestructedType()) {
Diag(FD->getLocation(), diag::err_flexible_array_has_nontrivial_dtor)
<< FD->getDeclName() << FD->getType();
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
continue;
}
// Okay, we have a legal flexible array member at the end of the struct.
Record->setHasFlexibleArrayMember(true);
} else {
// In ObjCContainerDecl ivars with incomplete array type are accepted,
// unless they are followed by another ivar. That check is done
// elsewhere, after synthesized ivars are known.
}
} else if (!FDTy->isDependentType() &&
RequireCompleteType(FD->getLocation(), FD->getType(),
diag::err_field_incomplete)) {
// Incomplete type
FD->setInvalidDecl();
EnclosingDecl->setInvalidDecl();
continue;
} else if (const RecordType *FDTTy = FDTy->getAs<RecordType>()) {
if (Record && FDTTy->getDecl()->hasFlexibleArrayMember()) {
// A type which contains a flexible array member is considered to be a
// flexible array member.
Record->setHasFlexibleArrayMember(true);
if (!Record->isUnion()) {
// If this is a struct/class and this is not the last element, reject
// it. Note that GCC supports variable sized arrays in the middle of
// structures.
if (!IsLastField)
Diag(FD->getLocation(), diag::ext_variable_sized_type_in_struct)
<< FD->getDeclName() << FD->getType();
else {
// We support flexible arrays at the end of structs in
// other structs as an extension.
Diag(FD->getLocation(), diag::ext_flexible_array_in_struct)
<< FD->getDeclName();
}
}
}
if (isa<ObjCContainerDecl>(EnclosingDecl) &&
RequireNonAbstractType(FD->getLocation(), FD->getType(),
diag::err_abstract_type_in_decl,
AbstractIvarType)) {
// Ivars can not have abstract class types
FD->setInvalidDecl();
}
if (Record && FDTTy->getDecl()->hasObjectMember())
Record->setHasObjectMember(true);
if (Record && FDTTy->getDecl()->hasVolatileMember())
Record->setHasVolatileMember(true);
} else if (FDTy->isObjCObjectType()) {
/// A field cannot be an Objective-c object
Diag(FD->getLocation(), diag::err_statically_allocated_object)
<< FixItHint::CreateInsertion(FD->getLocation(), "*");
QualType T = Context.getObjCObjectPointerType(FD->getType());
FD->setType(T);
} else if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers() &&
Record && !ObjCFieldLifetimeErrReported && Record->isUnion()) {
// It's an error in ARC or Weak if a field has lifetime.
// We don't want to report this in a system header, though,
// so we just make the field unavailable.
// FIXME: that's really not sufficient; we need to make the type
// itself invalid to, say, initialize or copy.
QualType T = FD->getType();
if (T.hasNonTrivialObjCLifetime()) {
SourceLocation loc = FD->getLocation();
if (getSourceManager().isInSystemHeader(loc)) {
if (!FD->hasAttr<UnavailableAttr>()) {
FD->addAttr(UnavailableAttr::CreateImplicit(Context, "",
UnavailableAttr::IR_ARCFieldWithOwnership, loc));
}
} else {
Diag(FD->getLocation(), diag::err_arc_objc_object_in_tag)
<< T->isBlockPointerType() << Record->getTagKind();
}
ObjCFieldLifetimeErrReported = true;
}
} else if (getLangOpts().ObjC1 &&
getLangOpts().getGC() != LangOptions::NonGC &&
Record && !Record->hasObjectMember()) {
if (FD->getType()->isObjCObjectPointerType() ||
FD->getType().isObjCGCStrong())
Record->setHasObjectMember(true);
else if (Context.getAsArrayType(FD->getType())) {
QualType BaseType = Context.getBaseElementType(FD->getType());
if (BaseType->isRecordType() &&
BaseType->getAs<RecordType>()->getDecl()->hasObjectMember())
Record->setHasObjectMember(true);
else if (BaseType->isObjCObjectPointerType() ||
BaseType.isObjCGCStrong())
Record->setHasObjectMember(true);
}
}
if (Record && !getLangOpts().CPlusPlus && !FD->hasAttr<UnavailableAttr>()) {
QualType FT = FD->getType();
if (FT.isNonTrivialToPrimitiveDefaultInitialize())
Record->setNonTrivialToPrimitiveDefaultInitialize(true);
QualType::PrimitiveCopyKind PCK = FT.isNonTrivialToPrimitiveCopy();
if (PCK != QualType::PCK_Trivial && PCK != QualType::PCK_VolatileTrivial)
Record->setNonTrivialToPrimitiveCopy(true);
if (FT.isDestructedType()) {
Record->setNonTrivialToPrimitiveDestroy(true);
Record->setParamDestroyedInCallee(true);
}
if (const auto *RT = FT->getAs<RecordType>()) {
if (RT->getDecl()->getArgPassingRestrictions() ==
RecordDecl::APK_CanNeverPassInRegs)
Record->setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
} else if (FT.getQualifiers().getObjCLifetime() == Qualifiers::OCL_Weak)
Record->setArgPassingRestrictions(RecordDecl::APK_CanNeverPassInRegs);
}
if (Record && FD->getType().isVolatileQualified())
Record->setHasVolatileMember(true);
// Keep track of the number of named members.
if (FD->getIdentifier())
++NumNamedMembers;
}
// Okay, we successfully defined 'Record'.
if (Record) {
bool Completed = false;
if (CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(Record)) {
if (!CXXRecord->isInvalidDecl()) {
// Set access bits correctly on the directly-declared conversions.
for (CXXRecordDecl::conversion_iterator
I = CXXRecord->conversion_begin(),
E = CXXRecord->conversion_end(); I != E; ++I)
I.setAccess((*I)->getAccess());
}
if (!CXXRecord->isDependentType()) {
if (CXXRecord->hasUserDeclaredDestructor()) {
// Adjust user-defined destructor exception spec.
if (getLangOpts().CPlusPlus11)
AdjustDestructorExceptionSpec(CXXRecord,
CXXRecord->getDestructor());
}
// Add any implicitly-declared members to this class.
AddImplicitlyDeclaredMembersToClass(CXXRecord);
if (!CXXRecord->isInvalidDecl()) {
// If we have virtual base classes, we may end up finding multiple
// final overriders for a given virtual function. Check for this
// problem now.
if (CXXRecord->getNumVBases()) {
CXXFinalOverriderMap FinalOverriders;
CXXRecord->getFinalOverriders(FinalOverriders);
for (CXXFinalOverriderMap::iterator M = FinalOverriders.begin(),
MEnd = FinalOverriders.end();
M != MEnd; ++M) {
for (OverridingMethods::iterator SO = M->second.begin(),
SOEnd = M->second.end();
SO != SOEnd; ++SO) {
assert(SO->second.size() > 0 &&
"Virtual function without overriding functions?");
if (SO->second.size() == 1)
continue;
// C++ [class.virtual]p2:
// In a derived class, if a virtual member function of a base
// class subobject has more than one final overrider the
// program is ill-formed.
Diag(Record->getLocation(), diag::err_multiple_final_overriders)
<< (const NamedDecl *)M->first << Record;
Diag(M->first->getLocation(),
diag::note_overridden_virtual_function);
for (OverridingMethods::overriding_iterator
OM = SO->second.begin(),
OMEnd = SO->second.end();
OM != OMEnd; ++OM)
Diag(OM->Method->getLocation(), diag::note_final_overrider)
<< (const NamedDecl *)M->first << OM->Method->getParent();
Record->setInvalidDecl();
}
}
CXXRecord->completeDefinition(&FinalOverriders);
Completed = true;
}
}
}
}
if (!Completed)
Record->completeDefinition();
// Handle attributes before checking the layout.
ProcessDeclAttributeList(S, Record, Attrs);
// We may have deferred checking for a deleted destructor. Check now.
if (CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(Record)) {
auto *Dtor = CXXRecord->getDestructor();
if (Dtor && Dtor->isImplicit() &&
ShouldDeleteSpecialMember(Dtor, CXXDestructor)) {
CXXRecord->setImplicitDestructorIsDeleted();
SetDeclDeleted(Dtor, CXXRecord->getLocation());
}
}
if (Record->hasAttrs()) {
CheckAlignasUnderalignment(Record);
if (const MSInheritanceAttr *IA = Record->getAttr<MSInheritanceAttr>())
checkMSInheritanceAttrOnDefinition(cast<CXXRecordDecl>(Record),
IA->getRange(), IA->getBestCase(),
IA->getSemanticSpelling());
}
// Check if the structure/union declaration is a type that can have zero
// size in C. For C this is a language extension, for C++ it may cause
// compatibility problems.
bool CheckForZeroSize;
if (!getLangOpts().CPlusPlus) {
CheckForZeroSize = true;
} else {
// For C++ filter out types that cannot be referenced in C code.
CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record);
CheckForZeroSize =
CXXRecord->getLexicalDeclContext()->isExternCContext() &&
!CXXRecord->isDependentType() &&
CXXRecord->isCLike();
}
if (CheckForZeroSize) {
bool ZeroSize = true;
bool IsEmpty = true;
unsigned NonBitFields = 0;
for (RecordDecl::field_iterator I = Record->field_begin(),
E = Record->field_end();
(NonBitFields == 0 || ZeroSize) && I != E; ++I) {
IsEmpty = false;
if (I->isUnnamedBitfield()) {
if (!I->isZeroLengthBitField(Context))
ZeroSize = false;
} else {
++NonBitFields;
QualType FieldType = I->getType();
if (FieldType->isIncompleteType() ||
!Context.getTypeSizeInChars(FieldType).isZero())
ZeroSize = false;
}
}
// Empty structs are an extension in C (C99 6.7.2.1p7). They are
// allowed in C++, but warn if its declaration is inside
// extern "C" block.
if (ZeroSize) {
Diag(RecLoc, getLangOpts().CPlusPlus ?
diag::warn_zero_size_struct_union_in_extern_c :
diag::warn_zero_size_struct_union_compat)
<< IsEmpty << Record->isUnion() << (NonBitFields > 1);
}
// Structs without named members are extension in C (C99 6.7.2.1p7),
// but are accepted by GCC.
if (NonBitFields == 0 && !getLangOpts().CPlusPlus) {
Diag(RecLoc, IsEmpty ? diag::ext_empty_struct_union :
diag::ext_no_named_members_in_struct_union)
<< Record->isUnion();
}
}
} else {
ObjCIvarDecl **ClsFields =
reinterpret_cast<ObjCIvarDecl**>(RecFields.data());
if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(EnclosingDecl)) {
ID->setEndOfDefinitionLoc(RBrac);
// Add ivar's to class's DeclContext.
for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
ClsFields[i]->setLexicalDeclContext(ID);
ID->addDecl(ClsFields[i]);
}
// Must enforce the rule that ivars in the base classes may not be
// duplicates.
if (ID->getSuperClass())
DiagnoseDuplicateIvars(ID, ID->getSuperClass());
} else if (ObjCImplementationDecl *IMPDecl =
dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
assert(IMPDecl && "ActOnFields - missing ObjCImplementationDecl");
for (unsigned I = 0, N = RecFields.size(); I != N; ++I)
// Ivar declared in @implementation never belongs to the implementation.
// Only it is in implementation's lexical context.
ClsFields[I]->setLexicalDeclContext(IMPDecl);
CheckImplementationIvars(IMPDecl, ClsFields, RecFields.size(), RBrac);
IMPDecl->setIvarLBraceLoc(LBrac);
IMPDecl->setIvarRBraceLoc(RBrac);
} else if (ObjCCategoryDecl *CDecl =
dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
// case of ivars in class extension; all other cases have been
// reported as errors elsewhere.
// FIXME. Class extension does not have a LocEnd field.
// CDecl->setLocEnd(RBrac);
// Add ivar's to class extension's DeclContext.
// Diagnose redeclaration of private ivars.
ObjCInterfaceDecl *IDecl = CDecl->getClassInterface();
for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
if (IDecl) {
if (const ObjCIvarDecl *ClsIvar =
IDecl->getIvarDecl(ClsFields[i]->getIdentifier())) {
Diag(ClsFields[i]->getLocation(),
diag::err_duplicate_ivar_declaration);
Diag(ClsIvar->getLocation(), diag::note_previous_definition);
continue;
}
for (const auto *Ext : IDecl->known_extensions()) {
if (const ObjCIvarDecl *ClsExtIvar
= Ext->getIvarDecl(ClsFields[i]->getIdentifier())) {
Diag(ClsFields[i]->getLocation(),
diag::err_duplicate_ivar_declaration);
Diag(ClsExtIvar->getLocation(), diag::note_previous_definition);
continue;
}
}
}
ClsFields[i]->setLexicalDeclContext(CDecl);
CDecl->addDecl(ClsFields[i]);
}
CDecl->setIvarLBraceLoc(LBrac);
CDecl->setIvarRBraceLoc(RBrac);
}
}
}
/// Determine whether the given integral value is representable within
/// the given type T.
static bool isRepresentableIntegerValue(ASTContext &Context,
llvm::APSInt &Value,
QualType T) {
assert((T->isIntegralType(Context) || T->isEnumeralType()) &&
"Integral type required!");
unsigned BitWidth = Context.getIntWidth(T);
if (Value.isUnsigned() || Value.isNonNegative()) {
if (T->isSignedIntegerOrEnumerationType())
--BitWidth;
return Value.getActiveBits() <= BitWidth;
}
return Value.getMinSignedBits() <= BitWidth;
}
// Given an integral type, return the next larger integral type
// (or a NULL type of no such type exists).
static QualType getNextLargerIntegralType(ASTContext &Context, QualType T) {
// FIXME: Int128/UInt128 support, which also needs to be introduced into
// enum checking below.
assert((T->isIntegralType(Context) ||
T->isEnumeralType()) && "Integral type required!");
const unsigned NumTypes = 4;
QualType SignedIntegralTypes[NumTypes] = {
Context.ShortTy, Context.IntTy, Context.LongTy, Context.LongLongTy
};
QualType UnsignedIntegralTypes[NumTypes] = {
Context.UnsignedShortTy, Context.UnsignedIntTy, Context.UnsignedLongTy,
Context.UnsignedLongLongTy
};
unsigned BitWidth = Context.getTypeSize(T);
QualType *Types = T->isSignedIntegerOrEnumerationType()? SignedIntegralTypes
: UnsignedIntegralTypes;
for (unsigned I = 0; I != NumTypes; ++I)
if (Context.getTypeSize(Types[I]) > BitWidth)
return Types[I];
return QualType();
}
EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *Val) {
unsigned IntWidth = Context.getTargetInfo().getIntWidth();
llvm::APSInt EnumVal(IntWidth);
QualType EltTy;
if (Val && DiagnoseUnexpandedParameterPack(Val, UPPC_EnumeratorValue))
Val = nullptr;
if (Val)
Val = DefaultLvalueConversion(Val).get();
if (Val) {
if (Enum->isDependentType() || Val->isTypeDependent())
EltTy = Context.DependentTy;
else {
if (getLangOpts().CPlusPlus11 && Enum->isFixed() &&
!getLangOpts().MSVCCompat) {
// C++11 [dcl.enum]p5: If the underlying type is fixed, [...] the
// constant-expression in the enumerator-definition shall be a converted
// constant expression of the underlying type.
EltTy = Enum->getIntegerType();
ExprResult Converted =
CheckConvertedConstantExpression(Val, EltTy, EnumVal,
CCEK_Enumerator);
if (Converted.isInvalid())
Val = nullptr;
else
Val = Converted.get();
} else if (!Val->isValueDependent() &&
!(Val = VerifyIntegerConstantExpression(Val,
&EnumVal).get())) {
// C99 6.7.2.2p2: Make sure we have an integer constant expression.
} else {
if (Enum->isComplete()) {
EltTy = Enum->getIntegerType();
// In Obj-C and Microsoft mode, require the enumeration value to be
// representable in the underlying type of the enumeration. In C++11,
// we perform a non-narrowing conversion as part of converted constant
// expression checking.
if (!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
if (getLangOpts().MSVCCompat) {
Diag(IdLoc, diag::ext_enumerator_too_large) << EltTy;
Val = ImpCastExprToType(Val, EltTy, CK_IntegralCast).get();
} else
Diag(IdLoc, diag::err_enumerator_too_large) << EltTy;
} else
Val = ImpCastExprToType(Val, EltTy,
EltTy->isBooleanType() ?
CK_IntegralToBoolean : CK_IntegralCast)
.get();
} else if (getLangOpts().CPlusPlus) {
// C++11 [dcl.enum]p5:
// If the underlying type is not fixed, the type of each enumerator
// is the type of its initializing value:
// - If an initializer is specified for an enumerator, the
// initializing value has the same type as the expression.
EltTy = Val->getType();
} else {
// C99 6.7.2.2p2:
// The expression that defines the value of an enumeration constant
// shall be an integer constant expression that has a value
// representable as an int.
// Complain if the value is not representable in an int.
if (!isRepresentableIntegerValue(Context, EnumVal, Context.IntTy))
Diag(IdLoc, diag::ext_enum_value_not_int)
<< EnumVal.toString(10) << Val->getSourceRange()
<< (EnumVal.isUnsigned() || EnumVal.isNonNegative());
else if (!Context.hasSameType(Val->getType(), Context.IntTy)) {
// Force the type of the expression to 'int'.
Val = ImpCastExprToType(Val, Context.IntTy, CK_IntegralCast).get();
}
EltTy = Val->getType();
}
}
}
}
if (!Val) {
if (Enum->isDependentType())
EltTy = Context.DependentTy;
else if (!LastEnumConst) {
// C++0x [dcl.enum]p5:
// If the underlying type is not fixed, the type of each enumerator
// is the type of its initializing value:
// - If no initializer is specified for the first enumerator, the
// initializing value has an unspecified integral type.
//
// GCC uses 'int' for its unspecified integral type, as does
// C99 6.7.2.2p3.
if (Enum->isFixed()) {
EltTy = Enum->getIntegerType();
}
else {
EltTy = Context.IntTy;
}
} else {
// Assign the last value + 1.
EnumVal = LastEnumConst->getInitVal();
++EnumVal;
EltTy = LastEnumConst->getType();
// Check for overflow on increment.
if (EnumVal < LastEnumConst->getInitVal()) {
// C++0x [dcl.enum]p5:
// If the underlying type is not fixed, the type of each enumerator
// is the type of its initializing value:
//
// - Otherwise the type of the initializing value is the same as
// the type of the initializing value of the preceding enumerator
// unless the incremented value is not representable in that type,
// in which case the type is an unspecified integral type
// sufficient to contain the incremented value. If no such type
// exists, the program is ill-formed.
QualType T = getNextLargerIntegralType(Context, EltTy);
if (T.isNull() || Enum->isFixed()) {
// There is no integral type larger enough to represent this
// value. Complain, then allow the value to wrap around.
EnumVal = LastEnumConst->getInitVal();
EnumVal = EnumVal.zext(EnumVal.getBitWidth() * 2);
++EnumVal;
if (Enum->isFixed())
// When the underlying type is fixed, this is ill-formed.
Diag(IdLoc, diag::err_enumerator_wrapped)
<< EnumVal.toString(10)
<< EltTy;
else
Diag(IdLoc, diag::ext_enumerator_increment_too_large)
<< EnumVal.toString(10);
} else {
EltTy = T;
}
// Retrieve the last enumerator's value, extent that type to the
// type that is supposed to be large enough to represent the incremented
// value, then increment.
EnumVal = LastEnumConst->getInitVal();
EnumVal.setIsSigned(EltTy->isSignedIntegerOrEnumerationType());
EnumVal = EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
++EnumVal;
// If we're not in C++, diagnose the overflow of enumerator values,
// which in C99 means that the enumerator value is not representable in
// an int (C99 6.7.2.2p2). However, we support GCC's extension that
// permits enumerator values that are representable in some larger
// integral type.
if (!getLangOpts().CPlusPlus && !T.isNull())
Diag(IdLoc, diag::warn_enum_value_overflow);
} else if (!getLangOpts().CPlusPlus &&
!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
// Enforce C99 6.7.2.2p2 even when we compute the next value.
Diag(IdLoc, diag::ext_enum_value_not_int)
<< EnumVal.toString(10) << 1;
}
}
}
if (!EltTy->isDependentType()) {
// Make the enumerator value match the signedness and size of the
// enumerator's type.
EnumVal = EnumVal.extOrTrunc(Context.getIntWidth(EltTy));
EnumVal.setIsSigned(EltTy->isSignedIntegerOrEnumerationType());
}
return EnumConstantDecl::Create(Context, Enum, IdLoc, Id, EltTy,
Val, EnumVal);
}
Sema::SkipBodyInfo Sema::shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc) {
if (!(getLangOpts().Modules || getLangOpts().ModulesLocalVisibility) ||
!getLangOpts().CPlusPlus)
return SkipBodyInfo();
// We have an anonymous enum definition. Look up the first enumerator to
// determine if we should merge the definition with an existing one and
// skip the body.
NamedDecl *PrevDecl = LookupSingleName(S, II, IILoc, LookupOrdinaryName,
forRedeclarationInCurContext());
auto *PrevECD = dyn_cast_or_null<EnumConstantDecl>(PrevDecl);
if (!PrevECD)
return SkipBodyInfo();
EnumDecl *PrevED = cast<EnumDecl>(PrevECD->getDeclContext());
NamedDecl *Hidden;
if (!PrevED->getDeclName() && !hasVisibleDefinition(PrevED, &Hidden)) {
SkipBodyInfo Skip;
Skip.Previous = Hidden;
return Skip;
}
return SkipBodyInfo();
}
Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val) {
EnumDecl *TheEnumDecl = cast<EnumDecl>(theEnumDecl);
EnumConstantDecl *LastEnumConst =
cast_or_null<EnumConstantDecl>(lastEnumConst);
// The scope passed in may not be a decl scope. Zip up the scope tree until
// we find one that is.
S = getNonFieldDeclScope(S);
// Verify that there isn't already something declared with this name in this
// scope.
NamedDecl *PrevDecl = LookupSingleName(S, Id, IdLoc, LookupOrdinaryName,
ForVisibleRedeclaration);
if (PrevDecl && PrevDecl->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(IdLoc, PrevDecl);
// Just pretend that we didn't see the previous declaration.
PrevDecl = nullptr;
}
// C++ [class.mem]p15:
// If T is the name of a class, then each of the following shall have a name
// different from T:
// - every enumerator of every member of class T that is an unscoped
// enumerated type
if (getLangOpts().CPlusPlus && !TheEnumDecl->isScoped())
DiagnoseClassNameShadow(TheEnumDecl->getDeclContext(),
DeclarationNameInfo(Id, IdLoc));
EnumConstantDecl *New =
CheckEnumConstant(TheEnumDecl, LastEnumConst, IdLoc, Id, Val);
if (!New)
return nullptr;
if (PrevDecl) {
// When in C++, we may get a TagDecl with the same name; in this case the
// enum constant will 'hide' the tag.
assert((getLangOpts().CPlusPlus || !isa<TagDecl>(PrevDecl)) &&
"Received TagDecl when not in C++!");
if (!isa<TagDecl>(PrevDecl) && isDeclInScope(PrevDecl, CurContext, S)) {
if (isa<EnumConstantDecl>(PrevDecl))
Diag(IdLoc, diag::err_redefinition_of_enumerator) << Id;
else
Diag(IdLoc, diag::err_redefinition) << Id;
notePreviousDefinition(PrevDecl, IdLoc);
return nullptr;
}
}
// Process attributes.
ProcessDeclAttributeList(S, New, Attrs);
AddPragmaAttributes(S, New);
// Register this decl in the current scope stack.
New->setAccess(TheEnumDecl->getAccess());
PushOnScopeChains(New, S);
ActOnDocumentableDecl(New);
return New;
}
// Returns true when the enum initial expression does not trigger the
// duplicate enum warning. A few common cases are exempted as follows:
// Element2 = Element1
// Element2 = Element1 + 1
// Element2 = Element1 - 1
// Where Element2 and Element1 are from the same enum.
static bool ValidDuplicateEnum(EnumConstantDecl *ECD, EnumDecl *Enum) {
Expr *InitExpr = ECD->getInitExpr();
if (!InitExpr)
return true;
InitExpr = InitExpr->IgnoreImpCasts();
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(InitExpr)) {
if (!BO->isAdditiveOp())
return true;
IntegerLiteral *IL = dyn_cast<IntegerLiteral>(BO->getRHS());
if (!IL)
return true;
if (IL->getValue() != 1)
return true;
InitExpr = BO->getLHS();
}
// This checks if the elements are from the same enum.
DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(InitExpr);
if (!DRE)
return true;
EnumConstantDecl *EnumConstant = dyn_cast<EnumConstantDecl>(DRE->getDecl());
if (!EnumConstant)
return true;
if (cast<EnumDecl>(TagDecl::castFromDeclContext(ECD->getDeclContext())) !=
Enum)
return true;
return false;
}
// Emits a warning when an element is implicitly set a value that
// a previous element has already been set to.
static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
EnumDecl *Enum, QualType EnumType) {
// Avoid anonymous enums
if (!Enum->getIdentifier())
return;
// Only check for small enums.
if (Enum->getNumPositiveBits() > 63 || Enum->getNumNegativeBits() > 64)
return;
if (S.Diags.isIgnored(diag::warn_duplicate_enum_values, Enum->getLocation()))
return;
typedef SmallVector<EnumConstantDecl *, 3> ECDVector;
typedef SmallVector<std::unique_ptr<ECDVector>, 3> DuplicatesVector;
typedef llvm::PointerUnion<EnumConstantDecl*, ECDVector*> DeclOrVector;
typedef llvm::DenseMap<int64_t, DeclOrVector> ValueToVectorMap;
// Use int64_t as a key to avoid needing special handling for DenseMap keys.
auto EnumConstantToKey = [](const EnumConstantDecl *D) {
llvm::APSInt Val = D->getInitVal();
return Val.isSigned() ? Val.getSExtValue() : Val.getZExtValue();
};
DuplicatesVector DupVector;
ValueToVectorMap EnumMap;
// Populate the EnumMap with all values represented by enum constants without
// an initializer.
for (auto *Element : Elements) {
EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Element);
// Null EnumConstantDecl means a previous diagnostic has been emitted for
// this constant. Skip this enum since it may be ill-formed.
if (!ECD) {
return;
}
// Constants with initalizers are handled in the next loop.
if (ECD->getInitExpr())
continue;
// Duplicate values are handled in the next loop.
EnumMap.insert({EnumConstantToKey(ECD), ECD});
}
if (EnumMap.size() == 0)
return;
// Create vectors for any values that has duplicates.
for (auto *Element : Elements) {
// The last loop returned if any constant was null.
EnumConstantDecl *ECD = cast<EnumConstantDecl>(Element);
if (!ValidDuplicateEnum(ECD, Enum))
continue;
auto Iter = EnumMap.find(EnumConstantToKey(ECD));
if (Iter == EnumMap.end())
continue;
DeclOrVector& Entry = Iter->second;
if (EnumConstantDecl *D = Entry.dyn_cast<EnumConstantDecl*>()) {
// Ensure constants are different.
if (D == ECD)
continue;
// Create new vector and push values onto it.
auto Vec = llvm::make_unique<ECDVector>();
Vec->push_back(D);
Vec->push_back(ECD);
// Update entry to point to the duplicates vector.
Entry = Vec.get();
// Store the vector somewhere we can consult later for quick emission of
// diagnostics.
DupVector.emplace_back(std::move(Vec));
continue;
}
ECDVector *Vec = Entry.get<ECDVector*>();
// Make sure constants are not added more than once.
if (*Vec->begin() == ECD)
continue;
Vec->push_back(ECD);
}
// Emit diagnostics.
for (const auto &Vec : DupVector) {
assert(Vec->size() > 1 && "ECDVector should have at least 2 elements.");
// Emit warning for one enum constant.
auto *FirstECD = Vec->front();
S.Diag(FirstECD->getLocation(), diag::warn_duplicate_enum_values)
<< FirstECD << FirstECD->getInitVal().toString(10)
<< FirstECD->getSourceRange();
// Emit one note for each of the remaining enum constants with
// the same value.
for (auto *ECD : llvm::make_range(Vec->begin() + 1, Vec->end()))
S.Diag(ECD->getLocation(), diag::note_duplicate_element)
<< ECD << ECD->getInitVal().toString(10)
<< ECD->getSourceRange();
}
}
bool Sema::IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const {
assert(ED->isClosedFlag() && "looking for value in non-flag or open enum");
assert(ED->isCompleteDefinition() && "expected enum definition");
auto R = FlagBitsCache.insert(std::make_pair(ED, llvm::APInt()));
llvm::APInt &FlagBits = R.first->second;
if (R.second) {
for (auto *E : ED->enumerators()) {
const auto &EVal = E->getInitVal();
// Only single-bit enumerators introduce new flag values.
if (EVal.isPowerOf2())
FlagBits = FlagBits.zextOrSelf(EVal.getBitWidth()) | EVal;
}
}
// A value is in a flag enum if either its bits are a subset of the enum's
// flag bits (the first condition) or we are allowing masks and the same is
// true of its complement (the second condition). When masks are allowed, we
// allow the common idiom of ~(enum1 | enum2) to be a valid enum value.
//
// While it's true that any value could be used as a mask, the assumption is
// that a mask will have all of the insignificant bits set. Anything else is
// likely a logic error.
llvm::APInt FlagMask = ~FlagBits.zextOrTrunc(Val.getBitWidth());
return !(FlagMask & Val) || (AllowMask && !(FlagMask & ~Val));
}
void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDeclX, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attrs) {
EnumDecl *Enum = cast<EnumDecl>(EnumDeclX);
QualType EnumType = Context.getTypeDeclType(Enum);
ProcessDeclAttributeList(S, Enum, Attrs);
if (Enum->isDependentType()) {
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
EnumConstantDecl *ECD =
cast_or_null<EnumConstantDecl>(Elements[i]);
if (!ECD) continue;
ECD->setType(EnumType);
}
Enum->completeDefinition(Context.DependentTy, Context.DependentTy, 0, 0);
return;
}
// TODO: If the result value doesn't fit in an int, it must be a long or long
// long value. ISO C does not support this, but GCC does as an extension,
// emit a warning.
unsigned IntWidth = Context.getTargetInfo().getIntWidth();
unsigned CharWidth = Context.getTargetInfo().getCharWidth();
unsigned ShortWidth = Context.getTargetInfo().getShortWidth();
// Verify that all the values are okay, compute the size of the values, and
// reverse the list.
unsigned NumNegativeBits = 0;
unsigned NumPositiveBits = 0;
// Keep track of whether all elements have type int.
bool AllElementsInt = true;
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
EnumConstantDecl *ECD =
cast_or_null<EnumConstantDecl>(Elements[i]);
if (!ECD) continue; // Already issued a diagnostic.
const llvm::APSInt &InitVal = ECD->getInitVal();
// Keep track of the size of positive and negative values.
if (InitVal.isUnsigned() || InitVal.isNonNegative())
NumPositiveBits = std::max(NumPositiveBits,
(unsigned)InitVal.getActiveBits());
else
NumNegativeBits = std::max(NumNegativeBits,
(unsigned)InitVal.getMinSignedBits());
// Keep track of whether every enum element has type int (very commmon).
if (AllElementsInt)
AllElementsInt = ECD->getType() == Context.IntTy;
}
// Figure out the type that should be used for this enum.
QualType BestType;
unsigned BestWidth;
// C++0x N3000 [conv.prom]p3:
// An rvalue of an unscoped enumeration type whose underlying
// type is not fixed can be converted to an rvalue of the first
// of the following types that can represent all the values of
// the enumeration: int, unsigned int, long int, unsigned long
// int, long long int, or unsigned long long int.
// C99 6.4.4.3p2:
// An identifier declared as an enumeration constant has type int.
// The C99 rule is modified by a gcc extension
QualType BestPromotionType;
bool Packed = Enum->hasAttr<PackedAttr>();
// -fshort-enums is the equivalent to specifying the packed attribute on all
// enum definitions.
if (LangOpts.ShortEnums)
Packed = true;
// If the enum already has a type because it is fixed or dictated by the
// target, promote that type instead of analyzing the enumerators.
if (Enum->isComplete()) {
BestType = Enum->getIntegerType();
if (BestType->isPromotableIntegerType())
BestPromotionType = Context.getPromotedIntegerType(BestType);
else
BestPromotionType = BestType;
BestWidth = Context.getIntWidth(BestType);
}
else if (NumNegativeBits) {
// If there is a negative value, figure out the smallest integer type (of
// int/long/longlong) that fits.
// If it's packed, check also if it fits a char or a short.
if (Packed && NumNegativeBits <= CharWidth && NumPositiveBits < CharWidth) {
BestType = Context.SignedCharTy;
BestWidth = CharWidth;
} else if (Packed && NumNegativeBits <= ShortWidth &&
NumPositiveBits < ShortWidth) {
BestType = Context.ShortTy;
BestWidth = ShortWidth;
} else if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) {
BestType = Context.IntTy;
BestWidth = IntWidth;
} else {
BestWidth = Context.getTargetInfo().getLongWidth();
if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth) {
BestType = Context.LongTy;
} else {
BestWidth = Context.getTargetInfo().getLongLongWidth();
if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
Diag(Enum->getLocation(), diag::ext_enum_too_large);
BestType = Context.LongLongTy;
}
}
BestPromotionType = (BestWidth <= IntWidth ? Context.IntTy : BestType);
} else {
// If there is no negative value, figure out the smallest type that fits
// all of the enumerator values.
// If it's packed, check also if it fits a char or a short.
if (Packed && NumPositiveBits <= CharWidth) {
BestType = Context.UnsignedCharTy;
BestPromotionType = Context.IntTy;
BestWidth = CharWidth;
} else if (Packed && NumPositiveBits <= ShortWidth) {
BestType = Context.UnsignedShortTy;
BestPromotionType = Context.IntTy;
BestWidth = ShortWidth;
} else if (NumPositiveBits <= IntWidth) {
BestType = Context.UnsignedIntTy;
BestWidth = IntWidth;
BestPromotionType
= (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
? Context.UnsignedIntTy : Context.IntTy;
} else if (NumPositiveBits <=
(BestWidth = Context.getTargetInfo().getLongWidth())) {
BestType = Context.UnsignedLongTy;
BestPromotionType
= (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
? Context.UnsignedLongTy : Context.LongTy;
} else {
BestWidth = Context.getTargetInfo().getLongLongWidth();
assert(NumPositiveBits <= BestWidth &&
"How could an initializer get larger than ULL?");
BestType = Context.UnsignedLongLongTy;
BestPromotionType
= (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
? Context.UnsignedLongLongTy : Context.LongLongTy;
}
}
// Loop over all of the enumerator constants, changing their types to match
// the type of the enum if needed.
for (auto *D : Elements) {
auto *ECD = cast_or_null<EnumConstantDecl>(D);
if (!ECD) continue; // Already issued a diagnostic.
// Standard C says the enumerators have int type, but we allow, as an
// extension, the enumerators to be larger than int size. If each
// enumerator value fits in an int, type it as an int, otherwise type it the
// same as the enumerator decl itself. This means that in "enum { X = 1U }"
// that X has type 'int', not 'unsigned'.
// Determine whether the value fits into an int.
llvm::APSInt InitVal = ECD->getInitVal();
// If it fits into an integer type, force it. Otherwise force it to match
// the enum decl type.
QualType NewTy;
unsigned NewWidth;
bool NewSign;
if (!getLangOpts().CPlusPlus &&
!Enum->isFixed() &&
isRepresentableIntegerValue(Context, InitVal, Context.IntTy)) {
NewTy = Context.IntTy;
NewWidth = IntWidth;
NewSign = true;
} else if (ECD->getType() == BestType) {
// Already the right type!
if (getLangOpts().CPlusPlus)
// C++ [dcl.enum]p4: Following the closing brace of an
// enum-specifier, each enumerator has the type of its
// enumeration.
ECD->setType(EnumType);
continue;
} else {
NewTy = BestType;
NewWidth = BestWidth;
NewSign = BestType->isSignedIntegerOrEnumerationType();
}
// Adjust the APSInt value.
InitVal = InitVal.extOrTrunc(NewWidth);
InitVal.setIsSigned(NewSign);
ECD->setInitVal(InitVal);
// Adjust the Expr initializer and type.
if (ECD->getInitExpr() &&
!Context.hasSameType(NewTy, ECD->getInitExpr()->getType()))
ECD->setInitExpr(ImplicitCastExpr::Create(Context, NewTy,
CK_IntegralCast,
ECD->getInitExpr(),
/*base paths*/ nullptr,
VK_RValue));
if (getLangOpts().CPlusPlus)
// C++ [dcl.enum]p4: Following the closing brace of an
// enum-specifier, each enumerator has the type of its
// enumeration.
ECD->setType(EnumType);
else
ECD->setType(NewTy);
}
Enum->completeDefinition(BestType, BestPromotionType,
NumPositiveBits, NumNegativeBits);
CheckForDuplicateEnumValues(*this, Elements, Enum, EnumType);
if (Enum->isClosedFlag()) {
for (Decl *D : Elements) {
EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(D);
if (!ECD) continue; // Already issued a diagnostic.
llvm::APSInt InitVal = ECD->getInitVal();
if (InitVal != 0 && !InitVal.isPowerOf2() &&
!IsValueInFlagEnum(Enum, InitVal, true))
Diag(ECD->getLocation(), diag::warn_flag_enum_constant_out_of_range)
<< ECD << Enum;
}
}
// Now that the enum type is defined, ensure it's not been underaligned.
if (Enum->hasAttrs())
CheckAlignasUnderalignment(Enum);
}
Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation StartLoc,
SourceLocation EndLoc) {
StringLiteral *AsmString = cast<StringLiteral>(expr);
FileScopeAsmDecl *New = FileScopeAsmDecl::Create(Context, CurContext,
AsmString, StartLoc,
EndLoc);
CurContext->addDecl(New);
return New;
}
static void checkModuleImportContext(Sema &S, Module *M,
SourceLocation ImportLoc, DeclContext *DC,
bool FromInclude = false) {
SourceLocation ExternCLoc;
if (auto *LSD = dyn_cast<LinkageSpecDecl>(DC)) {
switch (LSD->getLanguage()) {
case LinkageSpecDecl::lang_c:
if (ExternCLoc.isInvalid())
ExternCLoc = LSD->getLocStart();
break;
case LinkageSpecDecl::lang_cxx:
break;
}
DC = LSD->getParent();
}
while (isa<LinkageSpecDecl>(DC) || isa<ExportDecl>(DC))
DC = DC->getParent();
if (!isa<TranslationUnitDecl>(DC)) {
S.Diag(ImportLoc, (FromInclude && S.isModuleVisible(M))
? diag::ext_module_import_not_at_top_level_noop
: diag::err_module_import_not_at_top_level_fatal)
<< M->getFullModuleName() << DC;
S.Diag(cast<Decl>(DC)->getLocStart(),
diag::note_module_import_not_at_top_level) << DC;
} else if (!M->IsExternC && ExternCLoc.isValid()) {
S.Diag(ImportLoc, diag::ext_module_import_in_extern_c)
<< M->getFullModuleName();
S.Diag(ExternCLoc, diag::note_extern_c_begins_here);
}
}
Sema::DeclGroupPtrTy Sema::ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc,
ModuleDeclKind MDK,
ModuleIdPath Path) {
assert(getLangOpts().ModulesTS &&
"should only have module decl in modules TS");
// A module implementation unit requires that we are not compiling a module
// of any kind. A module interface unit requires that we are not compiling a
// module map.
switch (getLangOpts().getCompilingModule()) {
case LangOptions::CMK_None:
// It's OK to compile a module interface as a normal translation unit.
break;
case LangOptions::CMK_ModuleInterface:
if (MDK != ModuleDeclKind::Implementation)
break;
// We were asked to compile a module interface unit but this is a module
// implementation unit. That indicates the 'export' is missing.
Diag(ModuleLoc, diag::err_module_interface_implementation_mismatch)
<< FixItHint::CreateInsertion(ModuleLoc, "export ");
MDK = ModuleDeclKind::Interface;
break;
case LangOptions::CMK_ModuleMap:
Diag(ModuleLoc, diag::err_module_decl_in_module_map_module);
return nullptr;
}
assert(ModuleScopes.size() == 1 && "expected to be at global module scope");
// FIXME: Most of this work should be done by the preprocessor rather than
// here, in order to support macro import.
// Only one module-declaration is permitted per source file.
if (ModuleScopes.back().Module->Kind == Module::ModuleInterfaceUnit) {
Diag(ModuleLoc, diag::err_module_redeclaration);
Diag(VisibleModules.getImportLoc(ModuleScopes.back().Module),
diag::note_prev_module_declaration);
return nullptr;
}
// Flatten the dots in a module name. Unlike Clang's hierarchical module map
// modules, the dots here are just another character that can appear in a
// module name.
std::string ModuleName;
for (auto &Piece : Path) {
if (!ModuleName.empty())
ModuleName += ".";
ModuleName += Piece.first->getName();
}
// If a module name was explicitly specified on the command line, it must be
// correct.
if (!getLangOpts().CurrentModule.empty() &&
getLangOpts().CurrentModule != ModuleName) {
Diag(Path.front().second, diag::err_current_module_name_mismatch)
<< SourceRange(Path.front().second, Path.back().second)
<< getLangOpts().CurrentModule;
return nullptr;
}
const_cast<LangOptions&>(getLangOpts()).CurrentModule = ModuleName;
auto &Map = PP.getHeaderSearchInfo().getModuleMap();
Module *Mod;
switch (MDK) {
case ModuleDeclKind::Interface: {
// We can't have parsed or imported a definition of this module or parsed a
// module map defining it already.
if (auto *M = Map.findModule(ModuleName)) {
Diag(Path[0].second, diag::err_module_redefinition) << ModuleName;
if (M->DefinitionLoc.isValid())
Diag(M->DefinitionLoc, diag::note_prev_module_definition);
else if (const auto *FE = M->getASTFile())
Diag(M->DefinitionLoc, diag::note_prev_module_definition_from_ast_file)
<< FE->getName();
Mod = M;
break;
}
// Create a Module for the module that we're defining.
Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName,
ModuleScopes.front().Module);
assert(Mod && "module creation should not fail");
break;
}
case ModuleDeclKind::Partition:
// FIXME: Check we are in a submodule of the named module.
return nullptr;
case ModuleDeclKind::Implementation:
std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc(
PP.getIdentifierInfo(ModuleName), Path[0].second);
Mod = getModuleLoader().loadModule(ModuleLoc, Path, Module::AllVisible,
/*IsIncludeDirective=*/false);
if (!Mod) {
Diag(ModuleLoc, diag::err_module_not_defined) << ModuleName;
// Create an empty module interface unit for error recovery.
Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName,
ModuleScopes.front().Module);
}
break;
}
// Switch from the global module to the named module.
ModuleScopes.back().Module = Mod;
ModuleScopes.back().ModuleInterface = MDK != ModuleDeclKind::Implementation;
VisibleModules.setVisible(Mod, ModuleLoc);
// From now on, we have an owning module for all declarations we see.
// However, those declarations are module-private unless explicitly
// exported.
auto *TU = Context.getTranslationUnitDecl();
TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ModulePrivate);
TU->setLocalOwningModule(Mod);
// FIXME: Create a ModuleDecl.
return nullptr;
}
DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ImportLoc,
ModuleIdPath Path) {
Module *Mod =
getModuleLoader().loadModule(ImportLoc, Path, Module::AllVisible,
/*IsIncludeDirective=*/false);
if (!Mod)
return true;
VisibleModules.setVisible(Mod, ImportLoc);
checkModuleImportContext(*this, Mod, ImportLoc, CurContext);
// FIXME: we should support importing a submodule within a different submodule
// of the same top-level module. Until we do, make it an error rather than
// silently ignoring the import.
// Import-from-implementation is valid in the Modules TS. FIXME: Should we
// warn on a redundant import of the current module?
if (Mod->getTopLevelModuleName() == getLangOpts().CurrentModule &&
(getLangOpts().isCompilingModule() || !getLangOpts().ModulesTS))
Diag(ImportLoc, getLangOpts().isCompilingModule()
? diag::err_module_self_import
: diag::err_module_import_in_implementation)
<< Mod->getFullModuleName() << getLangOpts().CurrentModule;
SmallVector<SourceLocation, 2> IdentifierLocs;
Module *ModCheck = Mod;
for (unsigned I = 0, N = Path.size(); I != N; ++I) {
// If we've run out of module parents, just drop the remaining identifiers.
// We need the length to be consistent.
if (!ModCheck)
break;
ModCheck = ModCheck->Parent;
IdentifierLocs.push_back(Path[I].second);
}
ImportDecl *Import = ImportDecl::Create(Context, CurContext, StartLoc,
Mod, IdentifierLocs);
if (!ModuleScopes.empty())
Context.addModuleInitializer(ModuleScopes.back().Module, Import);
CurContext->addDecl(Import);
// Re-export the module if needed.
if (Import->isExported() &&
!ModuleScopes.empty() && ModuleScopes.back().ModuleInterface)
getCurrentModule()->Exports.emplace_back(Mod, false);
return Import;
}
void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext, true);
BuildModuleInclude(DirectiveLoc, Mod);
}
void Sema::BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
// Determine whether we're in the #include buffer for a module. The #includes
// in that buffer do not qualify as module imports; they're just an
// implementation detail of us building the module.
//
// FIXME: Should we even get ActOnModuleInclude calls for those?
bool IsInModuleIncludes =
TUKind == TU_Module &&
getSourceManager().isWrittenInMainFile(DirectiveLoc);
bool ShouldAddImport = !IsInModuleIncludes;
// If this module import was due to an inclusion directive, create an
// implicit import declaration to capture it in the AST.
if (ShouldAddImport) {
TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
DirectiveLoc, Mod,
DirectiveLoc);
if (!ModuleScopes.empty())
Context.addModuleInitializer(ModuleScopes.back().Module, ImportD);
TU->addDecl(ImportD);
Consumer.HandleImplicitImportDecl(ImportD);
}
getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, DirectiveLoc);
VisibleModules.setVisible(Mod, DirectiveLoc);
}
void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) {
checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext, true);
ModuleScopes.push_back({});
ModuleScopes.back().Module = Mod;
if (getLangOpts().ModulesLocalVisibility)
ModuleScopes.back().OuterVisibleModules = std::move(VisibleModules);
VisibleModules.setVisible(Mod, DirectiveLoc);
// The enclosing context is now part of this module.
// FIXME: Consider creating a child DeclContext to hold the entities
// lexically within the module.
if (getLangOpts().trackLocalOwningModule()) {
for (auto *DC = CurContext; DC; DC = DC->getLexicalParent()) {
cast<Decl>(DC)->setModuleOwnershipKind(
getLangOpts().ModulesLocalVisibility
? Decl::ModuleOwnershipKind::VisibleWhenImported
: Decl::ModuleOwnershipKind::Visible);
cast<Decl>(DC)->setLocalOwningModule(Mod);
}
}
}
void Sema::ActOnModuleEnd(SourceLocation EomLoc, Module *Mod) {
if (getLangOpts().ModulesLocalVisibility) {
VisibleModules = std::move(ModuleScopes.back().OuterVisibleModules);
// Leaving a module hides namespace names, so our visible namespace cache
// is now out of date.
VisibleNamespaceCache.clear();
}
assert(!ModuleScopes.empty() && ModuleScopes.back().Module == Mod &&
"left the wrong module scope");
ModuleScopes.pop_back();
// We got to the end of processing a local module. Create an
// ImportDecl as we would for an imported module.
FileID File = getSourceManager().getFileID(EomLoc);
SourceLocation DirectiveLoc;
if (EomLoc == getSourceManager().getLocForEndOfFile(File)) {
// We reached the end of a #included module header. Use the #include loc.
assert(File != getSourceManager().getMainFileID() &&
"end of submodule in main source file");
DirectiveLoc = getSourceManager().getIncludeLoc(File);
} else {
// We reached an EOM pragma. Use the pragma location.
DirectiveLoc = EomLoc;
}
BuildModuleInclude(DirectiveLoc, Mod);
// Any further declarations are in whatever module we returned to.
if (getLangOpts().trackLocalOwningModule()) {
// The parser guarantees that this is the same context that we entered
// the module within.
for (auto *DC = CurContext; DC; DC = DC->getLexicalParent()) {
cast<Decl>(DC)->setLocalOwningModule(getCurrentModule());
if (!getCurrentModule())
cast<Decl>(DC)->setModuleOwnershipKind(
Decl::ModuleOwnershipKind::Unowned);
}
}
}
void Sema::createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod) {
// Bail if we're not allowed to implicitly import a module here.
if (isSFINAEContext() || !getLangOpts().ModulesErrorRecovery ||
VisibleModules.isVisible(Mod))
return;
// Create the implicit import declaration.
TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
Loc, Mod, Loc);
TU->addDecl(ImportD);
Consumer.HandleImplicitImportDecl(ImportD);
// Make the module visible.
getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, Loc);
VisibleModules.setVisible(Mod, Loc);
}
/// We have parsed the start of an export declaration, including the '{'
/// (if present).
Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc) {
ExportDecl *D = ExportDecl::Create(Context, CurContext, ExportLoc);
// C++ Modules TS draft:
// An export-declaration shall appear in the purview of a module other than
// the global module.
if (ModuleScopes.empty() || !ModuleScopes.back().ModuleInterface)
Diag(ExportLoc, diag::err_export_not_in_module_interface);
// An export-declaration [...] shall not contain more than one
// export keyword.
//
// The intent here is that an export-declaration cannot appear within another
// export-declaration.
if (D->isExported())
Diag(ExportLoc, diag::err_export_within_export);
CurContext->addDecl(D);
PushDeclContext(S, D);
D->setModuleOwnershipKind(Decl::ModuleOwnershipKind::VisibleWhenImported);
return D;
}
/// Complete the definition of an export declaration.
Decl *Sema::ActOnFinishExportDecl(Scope *S, Decl *D, SourceLocation RBraceLoc) {
auto *ED = cast<ExportDecl>(D);
if (RBraceLoc.isValid())
ED->setRBraceLoc(RBraceLoc);
// FIXME: Diagnose export of internal-linkage declaration (including
// anonymous namespace).
PopDeclContext();
return D;
}
void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation NameLoc,
SourceLocation AliasNameLoc) {
NamedDecl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc,
LookupOrdinaryName);
AsmLabelAttr *Attr =
AsmLabelAttr::CreateImplicit(Context, AliasName->getName(), AliasNameLoc);
// If a declaration that:
// 1) declares a function or a variable
// 2) has external linkage
// already exists, add a label attribute to it.
if (PrevDecl && (isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) {
if (isDeclExternC(PrevDecl))
PrevDecl->addAttr(Attr);
else
Diag(PrevDecl->getLocation(), diag::warn_redefine_extname_not_applied)
<< /*Variable*/(isa<FunctionDecl>(PrevDecl) ? 0 : 1) << PrevDecl;
// Otherwise, add a label atttibute to ExtnameUndeclaredIdentifiers.
} else
(void)ExtnameUndeclaredIdentifiers.insert(std::make_pair(Name, Attr));
}
void Sema::ActOnPragmaWeakID(IdentifierInfo* Name,
SourceLocation PragmaLoc,
SourceLocation NameLoc) {
Decl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc, LookupOrdinaryName);
if (PrevDecl) {
PrevDecl->addAttr(WeakAttr::CreateImplicit(Context, PragmaLoc));
} else {
(void)WeakUndeclaredIdentifiers.insert(
std::pair<IdentifierInfo*,WeakInfo>
(Name, WeakInfo((IdentifierInfo*)nullptr, NameLoc)));
}
}
void Sema::ActOnPragmaWeakAlias(IdentifierInfo* Name,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation NameLoc,
SourceLocation AliasNameLoc) {
Decl *PrevDecl = LookupSingleName(TUScope, AliasName, AliasNameLoc,
LookupOrdinaryName);
WeakInfo W = WeakInfo(Name, NameLoc);
if (PrevDecl && (isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl))) {
if (!PrevDecl->hasAttr<AliasAttr>())
if (NamedDecl *ND = dyn_cast<NamedDecl>(PrevDecl))
DeclApplyPragmaWeak(TUScope, ND, W);
} else {
(void)WeakUndeclaredIdentifiers.insert(
std::pair<IdentifierInfo*,WeakInfo>(AliasName, W));
}
}
Decl *Sema::getObjCDeclContext() const {
return (dyn_cast_or_null<ObjCContainerDecl>(CurContext));
}
Index: projects/clang700-import/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp (revision 340125)
@@ -1,13819 +1,13820 @@
//===--- SemaOverload.cpp - C++ Overloading -------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides Sema routines for C++ overloading.
//
//===----------------------------------------------------------------------===//
#include "clang/Sema/Overload.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallString.h"
#include <algorithm>
#include <cstdlib>
using namespace clang;
using namespace sema;
static bool functionHasPassObjectSizeParams(const FunctionDecl *FD) {
return llvm::any_of(FD->parameters(), [](const ParmVarDecl *P) {
return P->hasAttr<PassObjectSizeAttr>();
});
}
/// A convenience routine for creating a decayed reference to a function.
static ExprResult
CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl,
const Expr *Base, bool HadMultipleCandidates,
SourceLocation Loc = SourceLocation(),
const DeclarationNameLoc &LocInfo = DeclarationNameLoc()){
if (S.DiagnoseUseOfDecl(FoundDecl, Loc))
return ExprError();
// If FoundDecl is different from Fn (such as if one is a template
// and the other a specialization), make sure DiagnoseUseOfDecl is
// called on both.
// FIXME: This would be more comprehensively addressed by modifying
// DiagnoseUseOfDecl to accept both the FoundDecl and the decl
// being used.
if (FoundDecl != Fn && S.DiagnoseUseOfDecl(Fn, Loc))
return ExprError();
if (auto *FPT = Fn->getType()->getAs<FunctionProtoType>())
S.ResolveExceptionSpec(Loc, FPT);
DeclRefExpr *DRE = new (S.Context) DeclRefExpr(Fn, false, Fn->getType(),
VK_LValue, Loc, LocInfo);
if (HadMultipleCandidates)
DRE->setHadMultipleCandidates(true);
S.MarkDeclRefReferenced(DRE, Base);
return S.ImpCastExprToType(DRE, S.Context.getPointerType(DRE->getType()),
CK_FunctionToPointerDecay);
}
static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
bool CStyle,
bool AllowObjCWritebackConversion);
static bool IsTransparentUnionStandardConversion(Sema &S, Expr* From,
QualType &ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
bool CStyle);
static OverloadingResult
IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
UserDefinedConversionSequence& User,
OverloadCandidateSet& Conversions,
bool AllowExplicit,
bool AllowObjCConversionOnExplicit);
static ImplicitConversionSequence::CompareKind
CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2);
static ImplicitConversionSequence::CompareKind
CompareQualificationConversions(Sema &S,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2);
static ImplicitConversionSequence::CompareKind
CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2);
/// GetConversionRank - Retrieve the implicit conversion rank
/// corresponding to the given implicit conversion kind.
ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
static const ImplicitConversionRank
Rank[(int)ICK_Num_Conversion_Kinds] = {
ICR_Exact_Match,
ICR_Exact_Match,
ICR_Exact_Match,
ICR_Exact_Match,
ICR_Exact_Match,
ICR_Exact_Match,
ICR_Promotion,
ICR_Promotion,
ICR_Promotion,
ICR_Conversion,
ICR_Conversion,
ICR_Conversion,
ICR_Conversion,
ICR_Conversion,
ICR_Conversion,
ICR_Conversion,
ICR_Conversion,
ICR_Conversion,
ICR_Conversion,
ICR_OCL_Scalar_Widening,
ICR_Complex_Real_Conversion,
ICR_Conversion,
ICR_Conversion,
ICR_Writeback_Conversion,
ICR_Exact_Match, // NOTE(gbiv): This may not be completely right --
// it was omitted by the patch that added
// ICK_Zero_Event_Conversion
ICR_C_Conversion,
ICR_C_Conversion_Extension
};
return Rank[(int)Kind];
}
/// GetImplicitConversionName - Return the name of this kind of
/// implicit conversion.
static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
static const char* const Name[(int)ICK_Num_Conversion_Kinds] = {
"No conversion",
"Lvalue-to-rvalue",
"Array-to-pointer",
"Function-to-pointer",
"Function pointer conversion",
"Qualification",
"Integral promotion",
"Floating point promotion",
"Complex promotion",
"Integral conversion",
"Floating conversion",
"Complex conversion",
"Floating-integral conversion",
"Pointer conversion",
"Pointer-to-member conversion",
"Boolean conversion",
"Compatible-types conversion",
"Derived-to-base conversion",
"Vector conversion",
"Vector splat",
"Complex-real conversion",
"Block Pointer conversion",
"Transparent Union Conversion",
"Writeback conversion",
"OpenCL Zero Event Conversion",
"C specific type conversion",
"Incompatible pointer conversion"
};
return Name[Kind];
}
/// StandardConversionSequence - Set the standard conversion
/// sequence to the identity conversion.
void StandardConversionSequence::setAsIdentityConversion() {
First = ICK_Identity;
Second = ICK_Identity;
Third = ICK_Identity;
DeprecatedStringLiteralToCharPtr = false;
QualificationIncludesObjCLifetime = false;
ReferenceBinding = false;
DirectBinding = false;
IsLvalueReference = true;
BindsToFunctionLvalue = false;
BindsToRvalue = false;
BindsImplicitObjectArgumentWithoutRefQualifier = false;
ObjCLifetimeConversionBinding = false;
CopyConstructor = nullptr;
}
/// getRank - Retrieve the rank of this standard conversion sequence
/// (C++ 13.3.3.1.1p3). The rank is the largest rank of each of the
/// implicit conversions.
ImplicitConversionRank StandardConversionSequence::getRank() const {
ImplicitConversionRank Rank = ICR_Exact_Match;
if (GetConversionRank(First) > Rank)
Rank = GetConversionRank(First);
if (GetConversionRank(Second) > Rank)
Rank = GetConversionRank(Second);
if (GetConversionRank(Third) > Rank)
Rank = GetConversionRank(Third);
return Rank;
}
/// isPointerConversionToBool - Determines whether this conversion is
/// a conversion of a pointer or pointer-to-member to bool. This is
/// used as part of the ranking of standard conversion sequences
/// (C++ 13.3.3.2p4).
bool StandardConversionSequence::isPointerConversionToBool() const {
// Note that FromType has not necessarily been transformed by the
// array-to-pointer or function-to-pointer implicit conversions, so
// check for their presence as well as checking whether FromType is
// a pointer.
if (getToType(1)->isBooleanType() &&
(getFromType()->isPointerType() ||
getFromType()->isMemberPointerType() ||
getFromType()->isObjCObjectPointerType() ||
getFromType()->isBlockPointerType() ||
getFromType()->isNullPtrType() ||
First == ICK_Array_To_Pointer || First == ICK_Function_To_Pointer))
return true;
return false;
}
/// isPointerConversionToVoidPointer - Determines whether this
/// conversion is a conversion of a pointer to a void pointer. This is
/// used as part of the ranking of standard conversion sequences (C++
/// 13.3.3.2p4).
bool
StandardConversionSequence::
isPointerConversionToVoidPointer(ASTContext& Context) const {
QualType FromType = getFromType();
QualType ToType = getToType(1);
// Note that FromType has not necessarily been transformed by the
// array-to-pointer implicit conversion, so check for its presence
// and redo the conversion to get a pointer.
if (First == ICK_Array_To_Pointer)
FromType = Context.getArrayDecayedType(FromType);
if (Second == ICK_Pointer_Conversion && FromType->isAnyPointerType())
if (const PointerType* ToPtrType = ToType->getAs<PointerType>())
return ToPtrType->getPointeeType()->isVoidType();
return false;
}
/// Skip any implicit casts which could be either part of a narrowing conversion
/// or after one in an implicit conversion.
static const Expr *IgnoreNarrowingConversion(const Expr *Converted) {
while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Converted)) {
switch (ICE->getCastKind()) {
case CK_NoOp:
case CK_IntegralCast:
case CK_IntegralToBoolean:
case CK_IntegralToFloating:
case CK_BooleanToSignedIntegral:
case CK_FloatingToIntegral:
case CK_FloatingToBoolean:
case CK_FloatingCast:
Converted = ICE->getSubExpr();
continue;
default:
return Converted;
}
}
return Converted;
}
/// Check if this standard conversion sequence represents a narrowing
/// conversion, according to C++11 [dcl.init.list]p7.
///
/// \param Ctx The AST context.
/// \param Converted The result of applying this standard conversion sequence.
/// \param ConstantValue If this is an NK_Constant_Narrowing conversion, the
/// value of the expression prior to the narrowing conversion.
/// \param ConstantType If this is an NK_Constant_Narrowing conversion, the
/// type of the expression prior to the narrowing conversion.
/// \param IgnoreFloatToIntegralConversion If true type-narrowing conversions
/// from floating point types to integral types should be ignored.
NarrowingKind StandardConversionSequence::getNarrowingKind(
ASTContext &Ctx, const Expr *Converted, APValue &ConstantValue,
QualType &ConstantType, bool IgnoreFloatToIntegralConversion) const {
assert(Ctx.getLangOpts().CPlusPlus && "narrowing check outside C++");
// C++11 [dcl.init.list]p7:
// A narrowing conversion is an implicit conversion ...
QualType FromType = getToType(0);
QualType ToType = getToType(1);
// A conversion to an enumeration type is narrowing if the conversion to
// the underlying type is narrowing. This only arises for expressions of
// the form 'Enum{init}'.
if (auto *ET = ToType->getAs<EnumType>())
ToType = ET->getDecl()->getIntegerType();
switch (Second) {
// 'bool' is an integral type; dispatch to the right place to handle it.
case ICK_Boolean_Conversion:
if (FromType->isRealFloatingType())
goto FloatingIntegralConversion;
if (FromType->isIntegralOrUnscopedEnumerationType())
goto IntegralConversion;
// Boolean conversions can be from pointers and pointers to members
// [conv.bool], and those aren't considered narrowing conversions.
return NK_Not_Narrowing;
// -- from a floating-point type to an integer type, or
//
// -- from an integer type or unscoped enumeration type to a floating-point
// type, except where the source is a constant expression and the actual
// value after conversion will fit into the target type and will produce
// the original value when converted back to the original type, or
case ICK_Floating_Integral:
FloatingIntegralConversion:
if (FromType->isRealFloatingType() && ToType->isIntegralType(Ctx)) {
return NK_Type_Narrowing;
} else if (FromType->isIntegralOrUnscopedEnumerationType() &&
ToType->isRealFloatingType()) {
if (IgnoreFloatToIntegralConversion)
return NK_Not_Narrowing;
llvm::APSInt IntConstantValue;
const Expr *Initializer = IgnoreNarrowingConversion(Converted);
assert(Initializer && "Unknown conversion expression");
// If it's value-dependent, we can't tell whether it's narrowing.
if (Initializer->isValueDependent())
return NK_Dependent_Narrowing;
if (Initializer->isIntegerConstantExpr(IntConstantValue, Ctx)) {
// Convert the integer to the floating type.
llvm::APFloat Result(Ctx.getFloatTypeSemantics(ToType));
Result.convertFromAPInt(IntConstantValue, IntConstantValue.isSigned(),
llvm::APFloat::rmNearestTiesToEven);
// And back.
llvm::APSInt ConvertedValue = IntConstantValue;
bool ignored;
Result.convertToInteger(ConvertedValue,
llvm::APFloat::rmTowardZero, &ignored);
// If the resulting value is different, this was a narrowing conversion.
if (IntConstantValue != ConvertedValue) {
ConstantValue = APValue(IntConstantValue);
ConstantType = Initializer->getType();
return NK_Constant_Narrowing;
}
} else {
// Variables are always narrowings.
return NK_Variable_Narrowing;
}
}
return NK_Not_Narrowing;
// -- from long double to double or float, or from double to float, except
// where the source is a constant expression and the actual value after
// conversion is within the range of values that can be represented (even
// if it cannot be represented exactly), or
case ICK_Floating_Conversion:
if (FromType->isRealFloatingType() && ToType->isRealFloatingType() &&
Ctx.getFloatingTypeOrder(FromType, ToType) == 1) {
// FromType is larger than ToType.
const Expr *Initializer = IgnoreNarrowingConversion(Converted);
// If it's value-dependent, we can't tell whether it's narrowing.
if (Initializer->isValueDependent())
return NK_Dependent_Narrowing;
if (Initializer->isCXX11ConstantExpr(Ctx, &ConstantValue)) {
// Constant!
assert(ConstantValue.isFloat());
llvm::APFloat FloatVal = ConstantValue.getFloat();
// Convert the source value into the target type.
bool ignored;
llvm::APFloat::opStatus ConvertStatus = FloatVal.convert(
Ctx.getFloatTypeSemantics(ToType),
llvm::APFloat::rmNearestTiesToEven, &ignored);
// If there was no overflow, the source value is within the range of
// values that can be represented.
if (ConvertStatus & llvm::APFloat::opOverflow) {
ConstantType = Initializer->getType();
return NK_Constant_Narrowing;
}
} else {
return NK_Variable_Narrowing;
}
}
return NK_Not_Narrowing;
// -- from an integer type or unscoped enumeration type to an integer type
// that cannot represent all the values of the original type, except where
// the source is a constant expression and the actual value after
// conversion will fit into the target type and will produce the original
// value when converted back to the original type.
case ICK_Integral_Conversion:
IntegralConversion: {
assert(FromType->isIntegralOrUnscopedEnumerationType());
assert(ToType->isIntegralOrUnscopedEnumerationType());
const bool FromSigned = FromType->isSignedIntegerOrEnumerationType();
const unsigned FromWidth = Ctx.getIntWidth(FromType);
const bool ToSigned = ToType->isSignedIntegerOrEnumerationType();
const unsigned ToWidth = Ctx.getIntWidth(ToType);
if (FromWidth > ToWidth ||
(FromWidth == ToWidth && FromSigned != ToSigned) ||
(FromSigned && !ToSigned)) {
// Not all values of FromType can be represented in ToType.
llvm::APSInt InitializerValue;
const Expr *Initializer = IgnoreNarrowingConversion(Converted);
// If it's value-dependent, we can't tell whether it's narrowing.
if (Initializer->isValueDependent())
return NK_Dependent_Narrowing;
if (!Initializer->isIntegerConstantExpr(InitializerValue, Ctx)) {
// Such conversions on variables are always narrowing.
return NK_Variable_Narrowing;
}
bool Narrowing = false;
if (FromWidth < ToWidth) {
// Negative -> unsigned is narrowing. Otherwise, more bits is never
// narrowing.
if (InitializerValue.isSigned() && InitializerValue.isNegative())
Narrowing = true;
} else {
// Add a bit to the InitializerValue so we don't have to worry about
// signed vs. unsigned comparisons.
InitializerValue = InitializerValue.extend(
InitializerValue.getBitWidth() + 1);
// Convert the initializer to and from the target width and signed-ness.
llvm::APSInt ConvertedValue = InitializerValue;
ConvertedValue = ConvertedValue.trunc(ToWidth);
ConvertedValue.setIsSigned(ToSigned);
ConvertedValue = ConvertedValue.extend(InitializerValue.getBitWidth());
ConvertedValue.setIsSigned(InitializerValue.isSigned());
// If the result is different, this was a narrowing conversion.
if (ConvertedValue != InitializerValue)
Narrowing = true;
}
if (Narrowing) {
ConstantType = Initializer->getType();
ConstantValue = APValue(InitializerValue);
return NK_Constant_Narrowing;
}
}
return NK_Not_Narrowing;
}
default:
// Other kinds of conversions are not narrowings.
return NK_Not_Narrowing;
}
}
/// dump - Print this standard conversion sequence to standard
/// error. Useful for debugging overloading issues.
LLVM_DUMP_METHOD void StandardConversionSequence::dump() const {
raw_ostream &OS = llvm::errs();
bool PrintedSomething = false;
if (First != ICK_Identity) {
OS << GetImplicitConversionName(First);
PrintedSomething = true;
}
if (Second != ICK_Identity) {
if (PrintedSomething) {
OS << " -> ";
}
OS << GetImplicitConversionName(Second);
if (CopyConstructor) {
OS << " (by copy constructor)";
} else if (DirectBinding) {
OS << " (direct reference binding)";
} else if (ReferenceBinding) {
OS << " (reference binding)";
}
PrintedSomething = true;
}
if (Third != ICK_Identity) {
if (PrintedSomething) {
OS << " -> ";
}
OS << GetImplicitConversionName(Third);
PrintedSomething = true;
}
if (!PrintedSomething) {
OS << "No conversions required";
}
}
/// dump - Print this user-defined conversion sequence to standard
/// error. Useful for debugging overloading issues.
void UserDefinedConversionSequence::dump() const {
raw_ostream &OS = llvm::errs();
if (Before.First || Before.Second || Before.Third) {
Before.dump();
OS << " -> ";
}
if (ConversionFunction)
OS << '\'' << *ConversionFunction << '\'';
else
OS << "aggregate initialization";
if (After.First || After.Second || After.Third) {
OS << " -> ";
After.dump();
}
}
/// dump - Print this implicit conversion sequence to standard
/// error. Useful for debugging overloading issues.
void ImplicitConversionSequence::dump() const {
raw_ostream &OS = llvm::errs();
if (isStdInitializerListElement())
OS << "Worst std::initializer_list element conversion: ";
switch (ConversionKind) {
case StandardConversion:
OS << "Standard conversion: ";
Standard.dump();
break;
case UserDefinedConversion:
OS << "User-defined conversion: ";
UserDefined.dump();
break;
case EllipsisConversion:
OS << "Ellipsis conversion";
break;
case AmbiguousConversion:
OS << "Ambiguous conversion";
break;
case BadConversion:
OS << "Bad conversion";
break;
}
OS << "\n";
}
void AmbiguousConversionSequence::construct() {
new (&conversions()) ConversionSet();
}
void AmbiguousConversionSequence::destruct() {
conversions().~ConversionSet();
}
void
AmbiguousConversionSequence::copyFrom(const AmbiguousConversionSequence &O) {
FromTypePtr = O.FromTypePtr;
ToTypePtr = O.ToTypePtr;
new (&conversions()) ConversionSet(O.conversions());
}
namespace {
// Structure used by DeductionFailureInfo to store
// template argument information.
struct DFIArguments {
TemplateArgument FirstArg;
TemplateArgument SecondArg;
};
// Structure used by DeductionFailureInfo to store
// template parameter and template argument information.
struct DFIParamWithArguments : DFIArguments {
TemplateParameter Param;
};
// Structure used by DeductionFailureInfo to store template argument
// information and the index of the problematic call argument.
struct DFIDeducedMismatchArgs : DFIArguments {
TemplateArgumentList *TemplateArgs;
unsigned CallArgIndex;
};
}
/// Convert from Sema's representation of template deduction information
/// to the form used in overload-candidate information.
DeductionFailureInfo
clang::MakeDeductionFailureInfo(ASTContext &Context,
Sema::TemplateDeductionResult TDK,
TemplateDeductionInfo &Info) {
DeductionFailureInfo Result;
Result.Result = static_cast<unsigned>(TDK);
Result.HasDiagnostic = false;
switch (TDK) {
case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
case Sema::TDK_MiscellaneousDeductionFailure:
case Sema::TDK_CUDATargetMismatch:
Result.Data = nullptr;
break;
case Sema::TDK_Incomplete:
case Sema::TDK_InvalidExplicitArguments:
Result.Data = Info.Param.getOpaqueValue();
break;
case Sema::TDK_DeducedMismatch:
case Sema::TDK_DeducedMismatchNested: {
// FIXME: Should allocate from normal heap so that we can free this later.
auto *Saved = new (Context) DFIDeducedMismatchArgs;
Saved->FirstArg = Info.FirstArg;
Saved->SecondArg = Info.SecondArg;
Saved->TemplateArgs = Info.take();
Saved->CallArgIndex = Info.CallArgIndex;
Result.Data = Saved;
break;
}
case Sema::TDK_NonDeducedMismatch: {
// FIXME: Should allocate from normal heap so that we can free this later.
DFIArguments *Saved = new (Context) DFIArguments;
Saved->FirstArg = Info.FirstArg;
Saved->SecondArg = Info.SecondArg;
Result.Data = Saved;
break;
}
case Sema::TDK_IncompletePack:
// FIXME: It's slightly wasteful to allocate two TemplateArguments for this.
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified: {
// FIXME: Should allocate from normal heap so that we can free this later.
DFIParamWithArguments *Saved = new (Context) DFIParamWithArguments;
Saved->Param = Info.Param;
Saved->FirstArg = Info.FirstArg;
Saved->SecondArg = Info.SecondArg;
Result.Data = Saved;
break;
}
case Sema::TDK_SubstitutionFailure:
Result.Data = Info.take();
if (Info.hasSFINAEDiagnostic()) {
PartialDiagnosticAt *Diag = new (Result.Diagnostic) PartialDiagnosticAt(
SourceLocation(), PartialDiagnostic::NullDiagnostic());
Info.takeSFINAEDiagnostic(*Diag);
Result.HasDiagnostic = true;
}
break;
case Sema::TDK_Success:
case Sema::TDK_NonDependentConversionFailure:
llvm_unreachable("not a deduction failure");
}
return Result;
}
void DeductionFailureInfo::Destroy() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_Success:
case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_Incomplete:
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
case Sema::TDK_InvalidExplicitArguments:
case Sema::TDK_CUDATargetMismatch:
case Sema::TDK_NonDependentConversionFailure:
break;
case Sema::TDK_IncompletePack:
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
case Sema::TDK_DeducedMismatch:
case Sema::TDK_DeducedMismatchNested:
case Sema::TDK_NonDeducedMismatch:
// FIXME: Destroy the data?
Data = nullptr;
break;
case Sema::TDK_SubstitutionFailure:
// FIXME: Destroy the template argument list?
Data = nullptr;
if (PartialDiagnosticAt *Diag = getSFINAEDiagnostic()) {
Diag->~PartialDiagnosticAt();
HasDiagnostic = false;
}
break;
// Unhandled
case Sema::TDK_MiscellaneousDeductionFailure:
break;
}
}
PartialDiagnosticAt *DeductionFailureInfo::getSFINAEDiagnostic() {
if (HasDiagnostic)
return static_cast<PartialDiagnosticAt*>(static_cast<void*>(Diagnostic));
return nullptr;
}
TemplateParameter DeductionFailureInfo::getTemplateParameter() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_Success:
case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
case Sema::TDK_SubstitutionFailure:
case Sema::TDK_DeducedMismatch:
case Sema::TDK_DeducedMismatchNested:
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_CUDATargetMismatch:
case Sema::TDK_NonDependentConversionFailure:
return TemplateParameter();
case Sema::TDK_Incomplete:
case Sema::TDK_InvalidExplicitArguments:
return TemplateParameter::getFromOpaqueValue(Data);
case Sema::TDK_IncompletePack:
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
return static_cast<DFIParamWithArguments*>(Data)->Param;
// Unhandled
case Sema::TDK_MiscellaneousDeductionFailure:
break;
}
return TemplateParameter();
}
TemplateArgumentList *DeductionFailureInfo::getTemplateArgumentList() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_Success:
case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
case Sema::TDK_Incomplete:
case Sema::TDK_IncompletePack:
case Sema::TDK_InvalidExplicitArguments:
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_CUDATargetMismatch:
case Sema::TDK_NonDependentConversionFailure:
return nullptr;
case Sema::TDK_DeducedMismatch:
case Sema::TDK_DeducedMismatchNested:
return static_cast<DFIDeducedMismatchArgs*>(Data)->TemplateArgs;
case Sema::TDK_SubstitutionFailure:
return static_cast<TemplateArgumentList*>(Data);
// Unhandled
case Sema::TDK_MiscellaneousDeductionFailure:
break;
}
return nullptr;
}
const TemplateArgument *DeductionFailureInfo::getFirstArg() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_Success:
case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_Incomplete:
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
case Sema::TDK_InvalidExplicitArguments:
case Sema::TDK_SubstitutionFailure:
case Sema::TDK_CUDATargetMismatch:
case Sema::TDK_NonDependentConversionFailure:
return nullptr;
case Sema::TDK_IncompletePack:
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
case Sema::TDK_DeducedMismatch:
case Sema::TDK_DeducedMismatchNested:
case Sema::TDK_NonDeducedMismatch:
return &static_cast<DFIArguments*>(Data)->FirstArg;
// Unhandled
case Sema::TDK_MiscellaneousDeductionFailure:
break;
}
return nullptr;
}
const TemplateArgument *DeductionFailureInfo::getSecondArg() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_Success:
case Sema::TDK_Invalid:
case Sema::TDK_InstantiationDepth:
case Sema::TDK_Incomplete:
case Sema::TDK_IncompletePack:
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
case Sema::TDK_InvalidExplicitArguments:
case Sema::TDK_SubstitutionFailure:
case Sema::TDK_CUDATargetMismatch:
case Sema::TDK_NonDependentConversionFailure:
return nullptr;
case Sema::TDK_Inconsistent:
case Sema::TDK_Underqualified:
case Sema::TDK_DeducedMismatch:
case Sema::TDK_DeducedMismatchNested:
case Sema::TDK_NonDeducedMismatch:
return &static_cast<DFIArguments*>(Data)->SecondArg;
// Unhandled
case Sema::TDK_MiscellaneousDeductionFailure:
break;
}
return nullptr;
}
llvm::Optional<unsigned> DeductionFailureInfo::getCallArgIndex() {
switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
case Sema::TDK_DeducedMismatch:
case Sema::TDK_DeducedMismatchNested:
return static_cast<DFIDeducedMismatchArgs*>(Data)->CallArgIndex;
default:
return llvm::None;
}
}
void OverloadCandidateSet::destroyCandidates() {
for (iterator i = begin(), e = end(); i != e; ++i) {
for (auto &C : i->Conversions)
C.~ImplicitConversionSequence();
if (!i->Viable && i->FailureKind == ovl_fail_bad_deduction)
i->DeductionFailure.Destroy();
}
}
void OverloadCandidateSet::clear(CandidateSetKind CSK) {
destroyCandidates();
SlabAllocator.Reset();
NumInlineBytesUsed = 0;
Candidates.clear();
Functions.clear();
Kind = CSK;
}
namespace {
class UnbridgedCastsSet {
struct Entry {
Expr **Addr;
Expr *Saved;
};
SmallVector<Entry, 2> Entries;
public:
void save(Sema &S, Expr *&E) {
assert(E->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
Entry entry = { &E, E };
Entries.push_back(entry);
E = S.stripARCUnbridgedCast(E);
}
void restore() {
for (SmallVectorImpl<Entry>::iterator
i = Entries.begin(), e = Entries.end(); i != e; ++i)
*i->Addr = i->Saved;
}
};
}
/// checkPlaceholderForOverload - Do any interesting placeholder-like
/// preprocessing on the given expression.
///
/// \param unbridgedCasts a collection to which to add unbridged casts;
/// without this, they will be immediately diagnosed as errors
///
/// Return true on unrecoverable error.
static bool
checkPlaceholderForOverload(Sema &S, Expr *&E,
UnbridgedCastsSet *unbridgedCasts = nullptr) {
if (const BuiltinType *placeholder = E->getType()->getAsPlaceholderType()) {
// We can't handle overloaded expressions here because overload
// resolution might reasonably tweak them.
if (placeholder->getKind() == BuiltinType::Overload) return false;
// If the context potentially accepts unbridged ARC casts, strip
// the unbridged cast and add it to the collection for later restoration.
if (placeholder->getKind() == BuiltinType::ARCUnbridgedCast &&
unbridgedCasts) {
unbridgedCasts->save(S, E);
return false;
}
// Go ahead and check everything else.
ExprResult result = S.CheckPlaceholderExpr(E);
if (result.isInvalid())
return true;
E = result.get();
return false;
}
// Nothing to do.
return false;
}
/// checkArgPlaceholdersForOverload - Check a set of call operands for
/// placeholders.
static bool checkArgPlaceholdersForOverload(Sema &S,
MultiExprArg Args,
UnbridgedCastsSet &unbridged) {
for (unsigned i = 0, e = Args.size(); i != e; ++i)
if (checkPlaceholderForOverload(S, Args[i], &unbridged))
return true;
return false;
}
/// Determine whether the given New declaration is an overload of the
/// declarations in Old. This routine returns Ovl_Match or Ovl_NonFunction if
/// New and Old cannot be overloaded, e.g., if New has the same signature as
/// some function in Old (C++ 1.3.10) or if the Old declarations aren't
/// functions (or function templates) at all. When it does return Ovl_Match or
/// Ovl_NonFunction, MatchedDecl will point to the decl that New cannot be
/// overloaded with. This decl may be a UsingShadowDecl on top of the underlying
/// declaration.
///
/// Example: Given the following input:
///
/// void f(int, float); // #1
/// void f(int, int); // #2
/// int f(int, int); // #3
///
/// When we process #1, there is no previous declaration of "f", so IsOverload
/// will not be used.
///
/// When we process #2, Old contains only the FunctionDecl for #1. By comparing
/// the parameter types, we see that #1 and #2 are overloaded (since they have
/// different signatures), so this routine returns Ovl_Overload; MatchedDecl is
/// unchanged.
///
/// When we process #3, Old is an overload set containing #1 and #2. We compare
/// the signatures of #3 to #1 (they're overloaded, so we do nothing) and then
/// #3 to #2. Since the signatures of #3 and #2 are identical (return types of
/// functions are not part of the signature), IsOverload returns Ovl_Match and
/// MatchedDecl will be set to point to the FunctionDecl for #2.
///
/// 'NewIsUsingShadowDecl' indicates that 'New' is being introduced into a class
/// by a using declaration. The rules for whether to hide shadow declarations
/// ignore some properties which otherwise figure into a function template's
/// signature.
Sema::OverloadKind
Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
NamedDecl *&Match, bool NewIsUsingDecl) {
for (LookupResult::iterator I = Old.begin(), E = Old.end();
I != E; ++I) {
NamedDecl *OldD = *I;
bool OldIsUsingDecl = false;
if (isa<UsingShadowDecl>(OldD)) {
OldIsUsingDecl = true;
// We can always introduce two using declarations into the same
// context, even if they have identical signatures.
if (NewIsUsingDecl) continue;
OldD = cast<UsingShadowDecl>(OldD)->getTargetDecl();
}
// A using-declaration does not conflict with another declaration
// if one of them is hidden.
if ((OldIsUsingDecl || NewIsUsingDecl) && !isVisible(*I))
continue;
// If either declaration was introduced by a using declaration,
// we'll need to use slightly different rules for matching.
// Essentially, these rules are the normal rules, except that
// function templates hide function templates with different
// return types or template parameter lists.
bool UseMemberUsingDeclRules =
(OldIsUsingDecl || NewIsUsingDecl) && CurContext->isRecord() &&
!New->getFriendObjectKind();
if (FunctionDecl *OldF = OldD->getAsFunction()) {
if (!IsOverload(New, OldF, UseMemberUsingDeclRules)) {
if (UseMemberUsingDeclRules && OldIsUsingDecl) {
HideUsingShadowDecl(S, cast<UsingShadowDecl>(*I));
continue;
}
if (!isa<FunctionTemplateDecl>(OldD) &&
!shouldLinkPossiblyHiddenDecl(*I, New))
continue;
Match = *I;
return Ovl_Match;
}
// Builtins that have custom typechecking or have a reference should
// not be overloadable or redeclarable.
if (!getASTContext().canBuiltinBeRedeclared(OldF)) {
Match = *I;
return Ovl_NonFunction;
}
} else if (isa<UsingDecl>(OldD) || isa<UsingPackDecl>(OldD)) {
// We can overload with these, which can show up when doing
// redeclaration checks for UsingDecls.
assert(Old.getLookupKind() == LookupUsingDeclName);
} else if (isa<TagDecl>(OldD)) {
// We can always overload with tags by hiding them.
} else if (auto *UUD = dyn_cast<UnresolvedUsingValueDecl>(OldD)) {
// Optimistically assume that an unresolved using decl will
// overload; if it doesn't, we'll have to diagnose during
// template instantiation.
//
// Exception: if the scope is dependent and this is not a class
// member, the using declaration can only introduce an enumerator.
if (UUD->getQualifier()->isDependent() && !UUD->isCXXClassMember()) {
Match = *I;
return Ovl_NonFunction;
}
} else {
// (C++ 13p1):
// Only function declarations can be overloaded; object and type
// declarations cannot be overloaded.
Match = *I;
return Ovl_NonFunction;
}
}
return Ovl_Overload;
}
bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
bool UseMemberUsingDeclRules, bool ConsiderCudaAttrs) {
// C++ [basic.start.main]p2: This function shall not be overloaded.
if (New->isMain())
return false;
// MSVCRT user defined entry points cannot be overloaded.
if (New->isMSVCRTEntryPoint())
return false;
FunctionTemplateDecl *OldTemplate = Old->getDescribedFunctionTemplate();
FunctionTemplateDecl *NewTemplate = New->getDescribedFunctionTemplate();
// C++ [temp.fct]p2:
// A function template can be overloaded with other function templates
// and with normal (non-template) functions.
if ((OldTemplate == nullptr) != (NewTemplate == nullptr))
return true;
// Is the function New an overload of the function Old?
QualType OldQType = Context.getCanonicalType(Old->getType());
QualType NewQType = Context.getCanonicalType(New->getType());
// Compare the signatures (C++ 1.3.10) of the two functions to
// determine whether they are overloads. If we find any mismatch
// in the signature, they are overloads.
// If either of these functions is a K&R-style function (no
// prototype), then we consider them to have matching signatures.
if (isa<FunctionNoProtoType>(OldQType.getTypePtr()) ||
isa<FunctionNoProtoType>(NewQType.getTypePtr()))
return false;
const FunctionProtoType *OldType = cast<FunctionProtoType>(OldQType);
const FunctionProtoType *NewType = cast<FunctionProtoType>(NewQType);
// The signature of a function includes the types of its
// parameters (C++ 1.3.10), which includes the presence or absence
// of the ellipsis; see C++ DR 357).
if (OldQType != NewQType &&
(OldType->getNumParams() != NewType->getNumParams() ||
OldType->isVariadic() != NewType->isVariadic() ||
!FunctionParamTypesAreEqual(OldType, NewType)))
return true;
// C++ [temp.over.link]p4:
// The signature of a function template consists of its function
// signature, its return type and its template parameter list. The names
// of the template parameters are significant only for establishing the
// relationship between the template parameters and the rest of the
// signature.
//
// We check the return type and template parameter lists for function
// templates first; the remaining checks follow.
//
// However, we don't consider either of these when deciding whether
// a member introduced by a shadow declaration is hidden.
if (!UseMemberUsingDeclRules && NewTemplate &&
(!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
OldTemplate->getTemplateParameters(),
false, TPL_TemplateMatch) ||
- OldType->getReturnType() != NewType->getReturnType()))
+ !Context.hasSameType(Old->getDeclaredReturnType(),
+ New->getDeclaredReturnType())))
return true;
// If the function is a class member, its signature includes the
// cv-qualifiers (if any) and ref-qualifier (if any) on the function itself.
//
// As part of this, also check whether one of the member functions
// is static, in which case they are not overloads (C++
// 13.1p2). While not part of the definition of the signature,
// this check is important to determine whether these functions
// can be overloaded.
CXXMethodDecl *OldMethod = dyn_cast<CXXMethodDecl>(Old);
CXXMethodDecl *NewMethod = dyn_cast<CXXMethodDecl>(New);
if (OldMethod && NewMethod &&
!OldMethod->isStatic() && !NewMethod->isStatic()) {
if (OldMethod->getRefQualifier() != NewMethod->getRefQualifier()) {
if (!UseMemberUsingDeclRules &&
(OldMethod->getRefQualifier() == RQ_None ||
NewMethod->getRefQualifier() == RQ_None)) {
// C++0x [over.load]p2:
// - Member function declarations with the same name and the same
// parameter-type-list as well as member function template
// declarations with the same name, the same parameter-type-list, and
// the same template parameter lists cannot be overloaded if any of
// them, but not all, have a ref-qualifier (8.3.5).
Diag(NewMethod->getLocation(), diag::err_ref_qualifier_overload)
<< NewMethod->getRefQualifier() << OldMethod->getRefQualifier();
Diag(OldMethod->getLocation(), diag::note_previous_declaration);
}
return true;
}
// We may not have applied the implicit const for a constexpr member
// function yet (because we haven't yet resolved whether this is a static
// or non-static member function). Add it now, on the assumption that this
// is a redeclaration of OldMethod.
unsigned OldQuals = OldMethod->getTypeQualifiers();
unsigned NewQuals = NewMethod->getTypeQualifiers();
if (!getLangOpts().CPlusPlus14 && NewMethod->isConstexpr() &&
!isa<CXXConstructorDecl>(NewMethod))
NewQuals |= Qualifiers::Const;
// We do not allow overloading based off of '__restrict'.
OldQuals &= ~Qualifiers::Restrict;
NewQuals &= ~Qualifiers::Restrict;
if (OldQuals != NewQuals)
return true;
}
// Though pass_object_size is placed on parameters and takes an argument, we
// consider it to be a function-level modifier for the sake of function
// identity. Either the function has one or more parameters with
// pass_object_size or it doesn't.
if (functionHasPassObjectSizeParams(New) !=
functionHasPassObjectSizeParams(Old))
return true;
// enable_if attributes are an order-sensitive part of the signature.
for (specific_attr_iterator<EnableIfAttr>
NewI = New->specific_attr_begin<EnableIfAttr>(),
NewE = New->specific_attr_end<EnableIfAttr>(),
OldI = Old->specific_attr_begin<EnableIfAttr>(),
OldE = Old->specific_attr_end<EnableIfAttr>();
NewI != NewE || OldI != OldE; ++NewI, ++OldI) {
if (NewI == NewE || OldI == OldE)
return true;
llvm::FoldingSetNodeID NewID, OldID;
NewI->getCond()->Profile(NewID, Context, true);
OldI->getCond()->Profile(OldID, Context, true);
if (NewID != OldID)
return true;
}
if (getLangOpts().CUDA && ConsiderCudaAttrs) {
// Don't allow overloading of destructors. (In theory we could, but it
// would be a giant change to clang.)
if (isa<CXXDestructorDecl>(New))
return false;
CUDAFunctionTarget NewTarget = IdentifyCUDATarget(New),
OldTarget = IdentifyCUDATarget(Old);
if (NewTarget == CFT_InvalidTarget)
return false;
assert((OldTarget != CFT_InvalidTarget) && "Unexpected invalid target.");
// Allow overloading of functions with same signature and different CUDA
// target attributes.
return NewTarget != OldTarget;
}
// The signatures match; this is not an overload.
return false;
}
/// Checks availability of the function depending on the current
/// function context. Inside an unavailable function, unavailability is ignored.
///
/// \returns true if \arg FD is unavailable and current context is inside
/// an available function, false otherwise.
bool Sema::isFunctionConsideredUnavailable(FunctionDecl *FD) {
if (!FD->isUnavailable())
return false;
// Walk up the context of the caller.
Decl *C = cast<Decl>(CurContext);
do {
if (C->isUnavailable())
return false;
} while ((C = cast_or_null<Decl>(C->getDeclContext())));
return true;
}
/// Tries a user-defined conversion from From to ToType.
///
/// Produces an implicit conversion sequence for when a standard conversion
/// is not an option. See TryImplicitConversion for more information.
static ImplicitConversionSequence
TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion,
bool AllowObjCConversionOnExplicit) {
ImplicitConversionSequence ICS;
if (SuppressUserConversions) {
// We're not in the case above, so there is no conversion that
// we can perform.
ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
return ICS;
}
// Attempt user-defined conversion.
OverloadCandidateSet Conversions(From->getExprLoc(),
OverloadCandidateSet::CSK_Normal);
switch (IsUserDefinedConversion(S, From, ToType, ICS.UserDefined,
Conversions, AllowExplicit,
AllowObjCConversionOnExplicit)) {
case OR_Success:
case OR_Deleted:
ICS.setUserDefined();
// C++ [over.ics.user]p4:
// A conversion of an expression of class type to the same class
// type is given Exact Match rank, and a conversion of an
// expression of class type to a base class of that type is
// given Conversion rank, in spite of the fact that a copy
// constructor (i.e., a user-defined conversion function) is
// called for those cases.
if (CXXConstructorDecl *Constructor
= dyn_cast<CXXConstructorDecl>(ICS.UserDefined.ConversionFunction)) {
QualType FromCanon
= S.Context.getCanonicalType(From->getType().getUnqualifiedType());
QualType ToCanon
= S.Context.getCanonicalType(ToType).getUnqualifiedType();
if (Constructor->isCopyConstructor() &&
(FromCanon == ToCanon ||
S.IsDerivedFrom(From->getLocStart(), FromCanon, ToCanon))) {
// Turn this into a "standard" conversion sequence, so that it
// gets ranked with standard conversion sequences.
DeclAccessPair Found = ICS.UserDefined.FoundConversionFunction;
ICS.setStandard();
ICS.Standard.setAsIdentityConversion();
ICS.Standard.setFromType(From->getType());
ICS.Standard.setAllToTypes(ToType);
ICS.Standard.CopyConstructor = Constructor;
ICS.Standard.FoundCopyConstructor = Found;
if (ToCanon != FromCanon)
ICS.Standard.Second = ICK_Derived_To_Base;
}
}
break;
case OR_Ambiguous:
ICS.setAmbiguous();
ICS.Ambiguous.setFromType(From->getType());
ICS.Ambiguous.setToType(ToType);
for (OverloadCandidateSet::iterator Cand = Conversions.begin();
Cand != Conversions.end(); ++Cand)
if (Cand->Viable)
ICS.Ambiguous.addConversion(Cand->FoundDecl, Cand->Function);
break;
// Fall through.
case OR_No_Viable_Function:
ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
break;
}
return ICS;
}
/// TryImplicitConversion - Attempt to perform an implicit conversion
/// from the given expression (Expr) to the given type (ToType). This
/// function returns an implicit conversion sequence that can be used
/// to perform the initialization. Given
///
/// void f(float f);
/// void g(int i) { f(i); }
///
/// this routine would produce an implicit conversion sequence to
/// describe the initialization of f from i, which will be a standard
/// conversion sequence containing an lvalue-to-rvalue conversion (C++
/// 4.1) followed by a floating-integral conversion (C++ 4.9).
//
/// Note that this routine only determines how the conversion can be
/// performed; it does not actually perform the conversion. As such,
/// it will not produce any diagnostics if no conversion is available,
/// but will instead return an implicit conversion sequence of kind
/// "BadConversion".
///
/// If @p SuppressUserConversions, then user-defined conversions are
/// not permitted.
/// If @p AllowExplicit, then explicit user-defined conversions are
/// permitted.
///
/// \param AllowObjCWritebackConversion Whether we allow the Objective-C
/// writeback conversion, which allows __autoreleasing id* parameters to
/// be initialized with __strong id* or __weak id* arguments.
static ImplicitConversionSequence
TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion,
bool AllowObjCConversionOnExplicit) {
ImplicitConversionSequence ICS;
if (IsStandardConversion(S, From, ToType, InOverloadResolution,
ICS.Standard, CStyle, AllowObjCWritebackConversion)){
ICS.setStandard();
return ICS;
}
if (!S.getLangOpts().CPlusPlus) {
ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
return ICS;
}
// C++ [over.ics.user]p4:
// A conversion of an expression of class type to the same class
// type is given Exact Match rank, and a conversion of an
// expression of class type to a base class of that type is
// given Conversion rank, in spite of the fact that a copy/move
// constructor (i.e., a user-defined conversion function) is
// called for those cases.
QualType FromType = From->getType();
if (ToType->getAs<RecordType>() && FromType->getAs<RecordType>() &&
(S.Context.hasSameUnqualifiedType(FromType, ToType) ||
S.IsDerivedFrom(From->getLocStart(), FromType, ToType))) {
ICS.setStandard();
ICS.Standard.setAsIdentityConversion();
ICS.Standard.setFromType(FromType);
ICS.Standard.setAllToTypes(ToType);
// We don't actually check at this point whether there is a valid
// copy/move constructor, since overloading just assumes that it
// exists. When we actually perform initialization, we'll find the
// appropriate constructor to copy the returned object, if needed.
ICS.Standard.CopyConstructor = nullptr;
// Determine whether this is considered a derived-to-base conversion.
if (!S.Context.hasSameUnqualifiedType(FromType, ToType))
ICS.Standard.Second = ICK_Derived_To_Base;
return ICS;
}
return TryUserDefinedConversion(S, From, ToType, SuppressUserConversions,
AllowExplicit, InOverloadResolution, CStyle,
AllowObjCWritebackConversion,
AllowObjCConversionOnExplicit);
}
ImplicitConversionSequence
Sema::TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion) {
return ::TryImplicitConversion(*this, From, ToType,
SuppressUserConversions, AllowExplicit,
InOverloadResolution, CStyle,
AllowObjCWritebackConversion,
/*AllowObjCConversionOnExplicit=*/false);
}
/// PerformImplicitConversion - Perform an implicit conversion of the
/// expression From to the type ToType. Returns the
/// converted expression. Flavor is the kind of conversion we're
/// performing, used in the error message. If @p AllowExplicit,
/// explicit user-defined conversions are permitted.
ExprResult
Sema::PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action, bool AllowExplicit) {
ImplicitConversionSequence ICS;
return PerformImplicitConversion(From, ToType, Action, AllowExplicit, ICS);
}
ExprResult
Sema::PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action, bool AllowExplicit,
ImplicitConversionSequence& ICS) {
if (checkPlaceholderForOverload(*this, From))
return ExprError();
// Objective-C ARC: Determine whether we will allow the writeback conversion.
bool AllowObjCWritebackConversion
= getLangOpts().ObjCAutoRefCount &&
(Action == AA_Passing || Action == AA_Sending);
if (getLangOpts().ObjC1)
CheckObjCBridgeRelatedConversions(From->getLocStart(),
ToType, From->getType(), From);
ICS = ::TryImplicitConversion(*this, From, ToType,
/*SuppressUserConversions=*/false,
AllowExplicit,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
AllowObjCWritebackConversion,
/*AllowObjCConversionOnExplicit=*/false);
return PerformImplicitConversion(From, ToType, ICS, Action);
}
/// Determine whether the conversion from FromType to ToType is a valid
/// conversion that strips "noexcept" or "noreturn" off the nested function
/// type.
bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy) {
if (Context.hasSameUnqualifiedType(FromType, ToType))
return false;
// Permit the conversion F(t __attribute__((noreturn))) -> F(t)
// or F(t noexcept) -> F(t)
// where F adds one of the following at most once:
// - a pointer
// - a member pointer
// - a block pointer
// Changes here need matching changes in FindCompositePointerType.
CanQualType CanTo = Context.getCanonicalType(ToType);
CanQualType CanFrom = Context.getCanonicalType(FromType);
Type::TypeClass TyClass = CanTo->getTypeClass();
if (TyClass != CanFrom->getTypeClass()) return false;
if (TyClass != Type::FunctionProto && TyClass != Type::FunctionNoProto) {
if (TyClass == Type::Pointer) {
CanTo = CanTo.getAs<PointerType>()->getPointeeType();
CanFrom = CanFrom.getAs<PointerType>()->getPointeeType();
} else if (TyClass == Type::BlockPointer) {
CanTo = CanTo.getAs<BlockPointerType>()->getPointeeType();
CanFrom = CanFrom.getAs<BlockPointerType>()->getPointeeType();
} else if (TyClass == Type::MemberPointer) {
auto ToMPT = CanTo.getAs<MemberPointerType>();
auto FromMPT = CanFrom.getAs<MemberPointerType>();
// A function pointer conversion cannot change the class of the function.
if (ToMPT->getClass() != FromMPT->getClass())
return false;
CanTo = ToMPT->getPointeeType();
CanFrom = FromMPT->getPointeeType();
} else {
return false;
}
TyClass = CanTo->getTypeClass();
if (TyClass != CanFrom->getTypeClass()) return false;
if (TyClass != Type::FunctionProto && TyClass != Type::FunctionNoProto)
return false;
}
const auto *FromFn = cast<FunctionType>(CanFrom);
FunctionType::ExtInfo FromEInfo = FromFn->getExtInfo();
const auto *ToFn = cast<FunctionType>(CanTo);
FunctionType::ExtInfo ToEInfo = ToFn->getExtInfo();
bool Changed = false;
// Drop 'noreturn' if not present in target type.
if (FromEInfo.getNoReturn() && !ToEInfo.getNoReturn()) {
FromFn = Context.adjustFunctionType(FromFn, FromEInfo.withNoReturn(false));
Changed = true;
}
// Drop 'noexcept' if not present in target type.
if (const auto *FromFPT = dyn_cast<FunctionProtoType>(FromFn)) {
const auto *ToFPT = cast<FunctionProtoType>(ToFn);
if (FromFPT->isNothrow() && !ToFPT->isNothrow()) {
FromFn = cast<FunctionType>(
Context.getFunctionTypeWithExceptionSpec(QualType(FromFPT, 0),
EST_None)
.getTypePtr());
Changed = true;
}
// Convert FromFPT's ExtParameterInfo if necessary. The conversion is valid
// only if the ExtParameterInfo lists of the two function prototypes can be
// merged and the merged list is identical to ToFPT's ExtParameterInfo list.
SmallVector<FunctionProtoType::ExtParameterInfo, 4> NewParamInfos;
bool CanUseToFPT, CanUseFromFPT;
if (Context.mergeExtParameterInfo(ToFPT, FromFPT, CanUseToFPT,
CanUseFromFPT, NewParamInfos) &&
CanUseToFPT && !CanUseFromFPT) {
FunctionProtoType::ExtProtoInfo ExtInfo = FromFPT->getExtProtoInfo();
ExtInfo.ExtParameterInfos =
NewParamInfos.empty() ? nullptr : NewParamInfos.data();
QualType QT = Context.getFunctionType(FromFPT->getReturnType(),
FromFPT->getParamTypes(), ExtInfo);
FromFn = QT->getAs<FunctionType>();
Changed = true;
}
}
if (!Changed)
return false;
assert(QualType(FromFn, 0).isCanonical());
if (QualType(FromFn, 0) != CanTo) return false;
ResultTy = ToType;
return true;
}
/// Determine whether the conversion from FromType to ToType is a valid
/// vector conversion.
///
/// \param ICK Will be set to the vector conversion kind, if this is a vector
/// conversion.
static bool IsVectorConversion(Sema &S, QualType FromType,
QualType ToType, ImplicitConversionKind &ICK) {
// We need at least one of these types to be a vector type to have a vector
// conversion.
if (!ToType->isVectorType() && !FromType->isVectorType())
return false;
// Identical types require no conversions.
if (S.Context.hasSameUnqualifiedType(FromType, ToType))
return false;
// There are no conversions between extended vector types, only identity.
if (ToType->isExtVectorType()) {
// There are no conversions between extended vector types other than the
// identity conversion.
if (FromType->isExtVectorType())
return false;
// Vector splat from any arithmetic type to a vector.
if (FromType->isArithmeticType()) {
ICK = ICK_Vector_Splat;
return true;
}
}
// We can perform the conversion between vector types in the following cases:
// 1)vector types are equivalent AltiVec and GCC vector types
// 2)lax vector conversions are permitted and the vector types are of the
// same size
if (ToType->isVectorType() && FromType->isVectorType()) {
if (S.Context.areCompatibleVectorTypes(FromType, ToType) ||
S.isLaxVectorConversion(FromType, ToType)) {
ICK = ICK_Vector_Conversion;
return true;
}
}
return false;
}
static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
bool CStyle);
/// IsStandardConversion - Determines whether there is a standard
/// conversion sequence (C++ [conv], C++ [over.ics.scs]) from the
/// expression From to the type ToType. Standard conversion sequences
/// only consider non-class types; for conversions that involve class
/// types, use TryImplicitConversion. If a conversion exists, SCS will
/// contain the standard conversion sequence required to perform this
/// conversion and this routine will return true. Otherwise, this
/// routine will return false and the value of SCS is unspecified.
static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
bool CStyle,
bool AllowObjCWritebackConversion) {
QualType FromType = From->getType();
// Standard conversions (C++ [conv])
SCS.setAsIdentityConversion();
SCS.IncompatibleObjC = false;
SCS.setFromType(FromType);
SCS.CopyConstructor = nullptr;
// There are no standard conversions for class types in C++, so
// abort early. When overloading in C, however, we do permit them.
if (S.getLangOpts().CPlusPlus &&
(FromType->isRecordType() || ToType->isRecordType()))
return false;
// The first conversion can be an lvalue-to-rvalue conversion,
// array-to-pointer conversion, or function-to-pointer conversion
// (C++ 4p1).
if (FromType == S.Context.OverloadTy) {
DeclAccessPair AccessPair;
if (FunctionDecl *Fn
= S.ResolveAddressOfOverloadedFunction(From, ToType, false,
AccessPair)) {
// We were able to resolve the address of the overloaded function,
// so we can convert to the type of that function.
FromType = Fn->getType();
SCS.setFromType(FromType);
// we can sometimes resolve &foo<int> regardless of ToType, so check
// if the type matches (identity) or we are converting to bool
if (!S.Context.hasSameUnqualifiedType(
S.ExtractUnqualifiedFunctionType(ToType), FromType)) {
QualType resultTy;
// if the function type matches except for [[noreturn]], it's ok
if (!S.IsFunctionConversion(FromType,
S.ExtractUnqualifiedFunctionType(ToType), resultTy))
// otherwise, only a boolean conversion is standard
if (!ToType->isBooleanType())
return false;
}
// Check if the "from" expression is taking the address of an overloaded
// function and recompute the FromType accordingly. Take advantage of the
// fact that non-static member functions *must* have such an address-of
// expression.
CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn);
if (Method && !Method->isStatic()) {
assert(isa<UnaryOperator>(From->IgnoreParens()) &&
"Non-unary operator on non-static member address");
assert(cast<UnaryOperator>(From->IgnoreParens())->getOpcode()
== UO_AddrOf &&
"Non-address-of operator on non-static member address");
const Type *ClassType
= S.Context.getTypeDeclType(Method->getParent()).getTypePtr();
FromType = S.Context.getMemberPointerType(FromType, ClassType);
} else if (isa<UnaryOperator>(From->IgnoreParens())) {
assert(cast<UnaryOperator>(From->IgnoreParens())->getOpcode() ==
UO_AddrOf &&
"Non-address-of operator for overloaded function expression");
FromType = S.Context.getPointerType(FromType);
}
// Check that we've computed the proper type after overload resolution.
// FIXME: FixOverloadedFunctionReference has side-effects; we shouldn't
// be calling it from within an NDEBUG block.
assert(S.Context.hasSameType(
FromType,
S.FixOverloadedFunctionReference(From, AccessPair, Fn)->getType()));
} else {
return false;
}
}
// Lvalue-to-rvalue conversion (C++11 4.1):
// A glvalue (3.10) of a non-function, non-array type T can
// be converted to a prvalue.
bool argIsLValue = From->isGLValue();
if (argIsLValue &&
!FromType->isFunctionType() && !FromType->isArrayType() &&
S.Context.getCanonicalType(FromType) != S.Context.OverloadTy) {
SCS.First = ICK_Lvalue_To_Rvalue;
// C11 6.3.2.1p2:
// ... if the lvalue has atomic type, the value has the non-atomic version
// of the type of the lvalue ...
if (const AtomicType *Atomic = FromType->getAs<AtomicType>())
FromType = Atomic->getValueType();
// If T is a non-class type, the type of the rvalue is the
// cv-unqualified version of T. Otherwise, the type of the rvalue
// is T (C++ 4.1p1). C++ can't get here with class types; in C, we
// just strip the qualifiers because they don't matter.
FromType = FromType.getUnqualifiedType();
} else if (FromType->isArrayType()) {
// Array-to-pointer conversion (C++ 4.2)
SCS.First = ICK_Array_To_Pointer;
// An lvalue or rvalue of type "array of N T" or "array of unknown
// bound of T" can be converted to an rvalue of type "pointer to
// T" (C++ 4.2p1).
FromType = S.Context.getArrayDecayedType(FromType);
if (S.IsStringLiteralToNonConstPointerConversion(From, ToType)) {
// This conversion is deprecated in C++03 (D.4)
SCS.DeprecatedStringLiteralToCharPtr = true;
// For the purpose of ranking in overload resolution
// (13.3.3.1.1), this conversion is considered an
// array-to-pointer conversion followed by a qualification
// conversion (4.4). (C++ 4.2p2)
SCS.Second = ICK_Identity;
SCS.Third = ICK_Qualification;
SCS.QualificationIncludesObjCLifetime = false;
SCS.setAllToTypes(FromType);
return true;
}
} else if (FromType->isFunctionType() && argIsLValue) {
// Function-to-pointer conversion (C++ 4.3).
SCS.First = ICK_Function_To_Pointer;
if (auto *DRE = dyn_cast<DeclRefExpr>(From->IgnoreParenCasts()))
if (auto *FD = dyn_cast<FunctionDecl>(DRE->getDecl()))
if (!S.checkAddressOfFunctionIsAvailable(FD))
return false;
// An lvalue of function type T can be converted to an rvalue of
// type "pointer to T." The result is a pointer to the
// function. (C++ 4.3p1).
FromType = S.Context.getPointerType(FromType);
} else {
// We don't require any conversions for the first step.
SCS.First = ICK_Identity;
}
SCS.setToType(0, FromType);
// The second conversion can be an integral promotion, floating
// point promotion, integral conversion, floating point conversion,
// floating-integral conversion, pointer conversion,
// pointer-to-member conversion, or boolean conversion (C++ 4p1).
// For overloading in C, this can also be a "compatible-type"
// conversion.
bool IncompatibleObjC = false;
ImplicitConversionKind SecondICK = ICK_Identity;
if (S.Context.hasSameUnqualifiedType(FromType, ToType)) {
// The unqualified versions of the types are the same: there's no
// conversion to do.
SCS.Second = ICK_Identity;
} else if (S.IsIntegralPromotion(From, FromType, ToType)) {
// Integral promotion (C++ 4.5).
SCS.Second = ICK_Integral_Promotion;
FromType = ToType.getUnqualifiedType();
} else if (S.IsFloatingPointPromotion(FromType, ToType)) {
// Floating point promotion (C++ 4.6).
SCS.Second = ICK_Floating_Promotion;
FromType = ToType.getUnqualifiedType();
} else if (S.IsComplexPromotion(FromType, ToType)) {
// Complex promotion (Clang extension)
SCS.Second = ICK_Complex_Promotion;
FromType = ToType.getUnqualifiedType();
} else if (ToType->isBooleanType() &&
(FromType->isArithmeticType() ||
FromType->isAnyPointerType() ||
FromType->isBlockPointerType() ||
FromType->isMemberPointerType() ||
FromType->isNullPtrType())) {
// Boolean conversions (C++ 4.12).
SCS.Second = ICK_Boolean_Conversion;
FromType = S.Context.BoolTy;
} else if (FromType->isIntegralOrUnscopedEnumerationType() &&
ToType->isIntegralType(S.Context)) {
// Integral conversions (C++ 4.7).
SCS.Second = ICK_Integral_Conversion;
FromType = ToType.getUnqualifiedType();
} else if (FromType->isAnyComplexType() && ToType->isAnyComplexType()) {
// Complex conversions (C99 6.3.1.6)
SCS.Second = ICK_Complex_Conversion;
FromType = ToType.getUnqualifiedType();
} else if ((FromType->isAnyComplexType() && ToType->isArithmeticType()) ||
(ToType->isAnyComplexType() && FromType->isArithmeticType())) {
// Complex-real conversions (C99 6.3.1.7)
SCS.Second = ICK_Complex_Real;
FromType = ToType.getUnqualifiedType();
} else if (FromType->isRealFloatingType() && ToType->isRealFloatingType()) {
// FIXME: disable conversions between long double and __float128 if
// their representation is different until there is back end support
// We of course allow this conversion if long double is really double.
if (&S.Context.getFloatTypeSemantics(FromType) !=
&S.Context.getFloatTypeSemantics(ToType)) {
bool Float128AndLongDouble = ((FromType == S.Context.Float128Ty &&
ToType == S.Context.LongDoubleTy) ||
(FromType == S.Context.LongDoubleTy &&
ToType == S.Context.Float128Ty));
if (Float128AndLongDouble &&
(&S.Context.getFloatTypeSemantics(S.Context.LongDoubleTy) ==
&llvm::APFloat::PPCDoubleDouble()))
return false;
}
// Floating point conversions (C++ 4.8).
SCS.Second = ICK_Floating_Conversion;
FromType = ToType.getUnqualifiedType();
} else if ((FromType->isRealFloatingType() &&
ToType->isIntegralType(S.Context)) ||
(FromType->isIntegralOrUnscopedEnumerationType() &&
ToType->isRealFloatingType())) {
// Floating-integral conversions (C++ 4.9).
SCS.Second = ICK_Floating_Integral;
FromType = ToType.getUnqualifiedType();
} else if (S.IsBlockPointerConversion(FromType, ToType, FromType)) {
SCS.Second = ICK_Block_Pointer_Conversion;
} else if (AllowObjCWritebackConversion &&
S.isObjCWritebackConversion(FromType, ToType, FromType)) {
SCS.Second = ICK_Writeback_Conversion;
} else if (S.IsPointerConversion(From, FromType, ToType, InOverloadResolution,
FromType, IncompatibleObjC)) {
// Pointer conversions (C++ 4.10).
SCS.Second = ICK_Pointer_Conversion;
SCS.IncompatibleObjC = IncompatibleObjC;
FromType = FromType.getUnqualifiedType();
} else if (S.IsMemberPointerConversion(From, FromType, ToType,
InOverloadResolution, FromType)) {
// Pointer to member conversions (4.11).
SCS.Second = ICK_Pointer_Member;
} else if (IsVectorConversion(S, FromType, ToType, SecondICK)) {
SCS.Second = SecondICK;
FromType = ToType.getUnqualifiedType();
} else if (!S.getLangOpts().CPlusPlus &&
S.Context.typesAreCompatible(ToType, FromType)) {
// Compatible conversions (Clang extension for C function overloading)
SCS.Second = ICK_Compatible_Conversion;
FromType = ToType.getUnqualifiedType();
} else if (IsTransparentUnionStandardConversion(S, From, ToType,
InOverloadResolution,
SCS, CStyle)) {
SCS.Second = ICK_TransparentUnionConversion;
FromType = ToType;
} else if (tryAtomicConversion(S, From, ToType, InOverloadResolution, SCS,
CStyle)) {
// tryAtomicConversion has updated the standard conversion sequence
// appropriately.
return true;
} else if (ToType->isEventT() &&
From->isIntegerConstantExpr(S.getASTContext()) &&
From->EvaluateKnownConstInt(S.getASTContext()) == 0) {
SCS.Second = ICK_Zero_Event_Conversion;
FromType = ToType;
} else if (ToType->isQueueT() &&
From->isIntegerConstantExpr(S.getASTContext()) &&
(From->EvaluateKnownConstInt(S.getASTContext()) == 0)) {
SCS.Second = ICK_Zero_Queue_Conversion;
FromType = ToType;
} else {
// No second conversion required.
SCS.Second = ICK_Identity;
}
SCS.setToType(1, FromType);
// The third conversion can be a function pointer conversion or a
// qualification conversion (C++ [conv.fctptr], [conv.qual]).
bool ObjCLifetimeConversion;
if (S.IsFunctionConversion(FromType, ToType, FromType)) {
// Function pointer conversions (removing 'noexcept') including removal of
// 'noreturn' (Clang extension).
SCS.Third = ICK_Function_Conversion;
} else if (S.IsQualificationConversion(FromType, ToType, CStyle,
ObjCLifetimeConversion)) {
SCS.Third = ICK_Qualification;
SCS.QualificationIncludesObjCLifetime = ObjCLifetimeConversion;
FromType = ToType;
} else {
// No conversion required
SCS.Third = ICK_Identity;
}
// C++ [over.best.ics]p6:
// [...] Any difference in top-level cv-qualification is
// subsumed by the initialization itself and does not constitute
// a conversion. [...]
QualType CanonFrom = S.Context.getCanonicalType(FromType);
QualType CanonTo = S.Context.getCanonicalType(ToType);
if (CanonFrom.getLocalUnqualifiedType()
== CanonTo.getLocalUnqualifiedType() &&
CanonFrom.getLocalQualifiers() != CanonTo.getLocalQualifiers()) {
FromType = ToType;
CanonFrom = CanonTo;
}
SCS.setToType(2, FromType);
if (CanonFrom == CanonTo)
return true;
// If we have not converted the argument type to the parameter type,
// this is a bad conversion sequence, unless we're resolving an overload in C.
if (S.getLangOpts().CPlusPlus || !InOverloadResolution)
return false;
ExprResult ER = ExprResult{From};
Sema::AssignConvertType Conv =
S.CheckSingleAssignmentConstraints(ToType, ER,
/*Diagnose=*/false,
/*DiagnoseCFAudited=*/false,
/*ConvertRHS=*/false);
ImplicitConversionKind SecondConv;
switch (Conv) {
case Sema::Compatible:
SecondConv = ICK_C_Only_Conversion;
break;
// For our purposes, discarding qualifiers is just as bad as using an
// incompatible pointer. Note that an IncompatiblePointer conversion can drop
// qualifiers, as well.
case Sema::CompatiblePointerDiscardsQualifiers:
case Sema::IncompatiblePointer:
case Sema::IncompatiblePointerSign:
SecondConv = ICK_Incompatible_Pointer_Conversion;
break;
default:
return false;
}
// First can only be an lvalue conversion, so we pretend that this was the
// second conversion. First should already be valid from earlier in the
// function.
SCS.Second = SecondConv;
SCS.setToType(1, ToType);
// Third is Identity, because Second should rank us worse than any other
// conversion. This could also be ICK_Qualification, but it's simpler to just
// lump everything in with the second conversion, and we don't gain anything
// from making this ICK_Qualification.
SCS.Third = ICK_Identity;
SCS.setToType(2, ToType);
return true;
}
static bool
IsTransparentUnionStandardConversion(Sema &S, Expr* From,
QualType &ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
bool CStyle) {
const RecordType *UT = ToType->getAsUnionType();
if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
return false;
// The field to initialize within the transparent union.
RecordDecl *UD = UT->getDecl();
// It's compatible if the expression matches any of the fields.
for (const auto *it : UD->fields()) {
if (IsStandardConversion(S, From, it->getType(), InOverloadResolution, SCS,
CStyle, /*ObjCWritebackConversion=*/false)) {
ToType = it->getType();
return true;
}
}
return false;
}
/// IsIntegralPromotion - Determines whether the conversion from the
/// expression From (whose potentially-adjusted type is FromType) to
/// ToType is an integral promotion (C++ 4.5). If so, returns true and
/// sets PromotedType to the promoted type.
bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
const BuiltinType *To = ToType->getAs<BuiltinType>();
// All integers are built-in.
if (!To) {
return false;
}
// An rvalue of type char, signed char, unsigned char, short int, or
// unsigned short int can be converted to an rvalue of type int if
// int can represent all the values of the source type; otherwise,
// the source rvalue can be converted to an rvalue of type unsigned
// int (C++ 4.5p1).
if (FromType->isPromotableIntegerType() && !FromType->isBooleanType() &&
!FromType->isEnumeralType()) {
if (// We can promote any signed, promotable integer type to an int
(FromType->isSignedIntegerType() ||
// We can promote any unsigned integer type whose size is
// less than int to an int.
Context.getTypeSize(FromType) < Context.getTypeSize(ToType))) {
return To->getKind() == BuiltinType::Int;
}
return To->getKind() == BuiltinType::UInt;
}
// C++11 [conv.prom]p3:
// A prvalue of an unscoped enumeration type whose underlying type is not
// fixed (7.2) can be converted to an rvalue a prvalue of the first of the
// following types that can represent all the values of the enumeration
// (i.e., the values in the range bmin to bmax as described in 7.2): int,
// unsigned int, long int, unsigned long int, long long int, or unsigned
// long long int. If none of the types in that list can represent all the
// values of the enumeration, an rvalue a prvalue of an unscoped enumeration
// type can be converted to an rvalue a prvalue of the extended integer type
// with lowest integer conversion rank (4.13) greater than the rank of long
// long in which all the values of the enumeration can be represented. If
// there are two such extended types, the signed one is chosen.
// C++11 [conv.prom]p4:
// A prvalue of an unscoped enumeration type whose underlying type is fixed
// can be converted to a prvalue of its underlying type. Moreover, if
// integral promotion can be applied to its underlying type, a prvalue of an
// unscoped enumeration type whose underlying type is fixed can also be
// converted to a prvalue of the promoted underlying type.
if (const EnumType *FromEnumType = FromType->getAs<EnumType>()) {
// C++0x 7.2p9: Note that this implicit enum to int conversion is not
// provided for a scoped enumeration.
if (FromEnumType->getDecl()->isScoped())
return false;
// We can perform an integral promotion to the underlying type of the enum,
// even if that's not the promoted type. Note that the check for promoting
// the underlying type is based on the type alone, and does not consider
// the bitfield-ness of the actual source expression.
if (FromEnumType->getDecl()->isFixed()) {
QualType Underlying = FromEnumType->getDecl()->getIntegerType();
return Context.hasSameUnqualifiedType(Underlying, ToType) ||
IsIntegralPromotion(nullptr, Underlying, ToType);
}
// We have already pre-calculated the promotion type, so this is trivial.
if (ToType->isIntegerType() &&
isCompleteType(From->getLocStart(), FromType))
return Context.hasSameUnqualifiedType(
ToType, FromEnumType->getDecl()->getPromotionType());
// C++ [conv.prom]p5:
// If the bit-field has an enumerated type, it is treated as any other
// value of that type for promotion purposes.
//
// ... so do not fall through into the bit-field checks below in C++.
if (getLangOpts().CPlusPlus)
return false;
}
// C++0x [conv.prom]p2:
// A prvalue of type char16_t, char32_t, or wchar_t (3.9.1) can be converted
// to an rvalue a prvalue of the first of the following types that can
// represent all the values of its underlying type: int, unsigned int,
// long int, unsigned long int, long long int, or unsigned long long int.
// If none of the types in that list can represent all the values of its
// underlying type, an rvalue a prvalue of type char16_t, char32_t,
// or wchar_t can be converted to an rvalue a prvalue of its underlying
// type.
if (FromType->isAnyCharacterType() && !FromType->isCharType() &&
ToType->isIntegerType()) {
// Determine whether the type we're converting from is signed or
// unsigned.
bool FromIsSigned = FromType->isSignedIntegerType();
uint64_t FromSize = Context.getTypeSize(FromType);
// The types we'll try to promote to, in the appropriate
// order. Try each of these types.
QualType PromoteTypes[6] = {
Context.IntTy, Context.UnsignedIntTy,
Context.LongTy, Context.UnsignedLongTy ,
Context.LongLongTy, Context.UnsignedLongLongTy
};
for (int Idx = 0; Idx < 6; ++Idx) {
uint64_t ToSize = Context.getTypeSize(PromoteTypes[Idx]);
if (FromSize < ToSize ||
(FromSize == ToSize &&
FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) {
// We found the type that we can promote to. If this is the
// type we wanted, we have a promotion. Otherwise, no
// promotion.
return Context.hasSameUnqualifiedType(ToType, PromoteTypes[Idx]);
}
}
}
// An rvalue for an integral bit-field (9.6) can be converted to an
// rvalue of type int if int can represent all the values of the
// bit-field; otherwise, it can be converted to unsigned int if
// unsigned int can represent all the values of the bit-field. If
// the bit-field is larger yet, no integral promotion applies to
// it. If the bit-field has an enumerated type, it is treated as any
// other value of that type for promotion purposes (C++ 4.5p3).
// FIXME: We should delay checking of bit-fields until we actually perform the
// conversion.
//
// FIXME: In C, only bit-fields of types _Bool, int, or unsigned int may be
// promoted, per C11 6.3.1.1/2. We promote all bit-fields (including enum
// bit-fields and those whose underlying type is larger than int) for GCC
// compatibility.
if (From) {
if (FieldDecl *MemberDecl = From->getSourceBitField()) {
llvm::APSInt BitWidth;
if (FromType->isIntegralType(Context) &&
MemberDecl->getBitWidth()->isIntegerConstantExpr(BitWidth, Context)) {
llvm::APSInt ToSize(BitWidth.getBitWidth(), BitWidth.isUnsigned());
ToSize = Context.getTypeSize(ToType);
// Are we promoting to an int from a bitfield that fits in an int?
if (BitWidth < ToSize ||
(FromType->isSignedIntegerType() && BitWidth <= ToSize)) {
return To->getKind() == BuiltinType::Int;
}
// Are we promoting to an unsigned int from an unsigned bitfield
// that fits into an unsigned int?
if (FromType->isUnsignedIntegerType() && BitWidth <= ToSize) {
return To->getKind() == BuiltinType::UInt;
}
return false;
}
}
}
// An rvalue of type bool can be converted to an rvalue of type int,
// with false becoming zero and true becoming one (C++ 4.5p4).
if (FromType->isBooleanType() && To->getKind() == BuiltinType::Int) {
return true;
}
return false;
}
/// IsFloatingPointPromotion - Determines whether the conversion from
/// FromType to ToType is a floating point promotion (C++ 4.6). If so,
/// returns true and sets PromotedType to the promoted type.
bool Sema::IsFloatingPointPromotion(QualType FromType, QualType ToType) {
if (const BuiltinType *FromBuiltin = FromType->getAs<BuiltinType>())
if (const BuiltinType *ToBuiltin = ToType->getAs<BuiltinType>()) {
/// An rvalue of type float can be converted to an rvalue of type
/// double. (C++ 4.6p1).
if (FromBuiltin->getKind() == BuiltinType::Float &&
ToBuiltin->getKind() == BuiltinType::Double)
return true;
// C99 6.3.1.5p1:
// When a float is promoted to double or long double, or a
// double is promoted to long double [...].
if (!getLangOpts().CPlusPlus &&
(FromBuiltin->getKind() == BuiltinType::Float ||
FromBuiltin->getKind() == BuiltinType::Double) &&
(ToBuiltin->getKind() == BuiltinType::LongDouble ||
ToBuiltin->getKind() == BuiltinType::Float128))
return true;
// Half can be promoted to float.
if (!getLangOpts().NativeHalfType &&
FromBuiltin->getKind() == BuiltinType::Half &&
ToBuiltin->getKind() == BuiltinType::Float)
return true;
}
return false;
}
/// Determine if a conversion is a complex promotion.
///
/// A complex promotion is defined as a complex -> complex conversion
/// where the conversion between the underlying real types is a
/// floating-point or integral promotion.
bool Sema::IsComplexPromotion(QualType FromType, QualType ToType) {
const ComplexType *FromComplex = FromType->getAs<ComplexType>();
if (!FromComplex)
return false;
const ComplexType *ToComplex = ToType->getAs<ComplexType>();
if (!ToComplex)
return false;
return IsFloatingPointPromotion(FromComplex->getElementType(),
ToComplex->getElementType()) ||
IsIntegralPromotion(nullptr, FromComplex->getElementType(),
ToComplex->getElementType());
}
/// BuildSimilarlyQualifiedPointerType - In a pointer conversion from
/// the pointer type FromPtr to a pointer to type ToPointee, with the
/// same type qualifiers as FromPtr has on its pointee type. ToType,
/// if non-empty, will be a pointer to ToType that may or may not have
/// the right set of qualifiers on its pointee.
///
static QualType
BuildSimilarlyQualifiedPointerType(const Type *FromPtr,
QualType ToPointee, QualType ToType,
ASTContext &Context,
bool StripObjCLifetime = false) {
assert((FromPtr->getTypeClass() == Type::Pointer ||
FromPtr->getTypeClass() == Type::ObjCObjectPointer) &&
"Invalid similarly-qualified pointer type");
/// Conversions to 'id' subsume cv-qualifier conversions.
if (ToType->isObjCIdType() || ToType->isObjCQualifiedIdType())
return ToType.getUnqualifiedType();
QualType CanonFromPointee
= Context.getCanonicalType(FromPtr->getPointeeType());
QualType CanonToPointee = Context.getCanonicalType(ToPointee);
Qualifiers Quals = CanonFromPointee.getQualifiers();
if (StripObjCLifetime)
Quals.removeObjCLifetime();
// Exact qualifier match -> return the pointer type we're converting to.
if (CanonToPointee.getLocalQualifiers() == Quals) {
// ToType is exactly what we need. Return it.
if (!ToType.isNull())
return ToType.getUnqualifiedType();
// Build a pointer to ToPointee. It has the right qualifiers
// already.
if (isa<ObjCObjectPointerType>(ToType))
return Context.getObjCObjectPointerType(ToPointee);
return Context.getPointerType(ToPointee);
}
// Just build a canonical type that has the right qualifiers.
QualType QualifiedCanonToPointee
= Context.getQualifiedType(CanonToPointee.getLocalUnqualifiedType(), Quals);
if (isa<ObjCObjectPointerType>(ToType))
return Context.getObjCObjectPointerType(QualifiedCanonToPointee);
return Context.getPointerType(QualifiedCanonToPointee);
}
static bool isNullPointerConstantForConversion(Expr *Expr,
bool InOverloadResolution,
ASTContext &Context) {
// Handle value-dependent integral null pointer constants correctly.
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#903
if (Expr->isValueDependent() && !Expr->isTypeDependent() &&
Expr->getType()->isIntegerType() && !Expr->getType()->isEnumeralType())
return !InOverloadResolution;
return Expr->isNullPointerConstant(Context,
InOverloadResolution? Expr::NPC_ValueDependentIsNotNull
: Expr::NPC_ValueDependentIsNull);
}
/// IsPointerConversion - Determines whether the conversion of the
/// expression From, which has the (possibly adjusted) type FromType,
/// can be converted to the type ToType via a pointer conversion (C++
/// 4.10). If so, returns true and places the converted type (that
/// might differ from ToType in its cv-qualifiers at some level) into
/// ConvertedType.
///
/// This routine also supports conversions to and from block pointers
/// and conversions with Objective-C's 'id', 'id<protocols...>', and
/// pointers to interfaces. FIXME: Once we've determined the
/// appropriate overloading rules for Objective-C, we may want to
/// split the Objective-C checks into a different routine; however,
/// GCC seems to consider all of these conversions to be pointer
/// conversions, so for now they live here. IncompatibleObjC will be
/// set if the conversion is an allowed Objective-C conversion that
/// should result in a warning.
bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType,
bool &IncompatibleObjC) {
IncompatibleObjC = false;
if (isObjCPointerConversion(FromType, ToType, ConvertedType,
IncompatibleObjC))
return true;
// Conversion from a null pointer constant to any Objective-C pointer type.
if (ToType->isObjCObjectPointerType() &&
isNullPointerConstantForConversion(From, InOverloadResolution, Context)) {
ConvertedType = ToType;
return true;
}
// Blocks: Block pointers can be converted to void*.
if (FromType->isBlockPointerType() && ToType->isPointerType() &&
ToType->getAs<PointerType>()->getPointeeType()->isVoidType()) {
ConvertedType = ToType;
return true;
}
// Blocks: A null pointer constant can be converted to a block
// pointer type.
if (ToType->isBlockPointerType() &&
isNullPointerConstantForConversion(From, InOverloadResolution, Context)) {
ConvertedType = ToType;
return true;
}
// If the left-hand-side is nullptr_t, the right side can be a null
// pointer constant.
if (ToType->isNullPtrType() &&
isNullPointerConstantForConversion(From, InOverloadResolution, Context)) {
ConvertedType = ToType;
return true;
}
const PointerType* ToTypePtr = ToType->getAs<PointerType>();
if (!ToTypePtr)
return false;
// A null pointer constant can be converted to a pointer type (C++ 4.10p1).
if (isNullPointerConstantForConversion(From, InOverloadResolution, Context)) {
ConvertedType = ToType;
return true;
}
// Beyond this point, both types need to be pointers
// , including objective-c pointers.
QualType ToPointeeType = ToTypePtr->getPointeeType();
if (FromType->isObjCObjectPointerType() && ToPointeeType->isVoidType() &&
!getLangOpts().ObjCAutoRefCount) {
ConvertedType = BuildSimilarlyQualifiedPointerType(
FromType->getAs<ObjCObjectPointerType>(),
ToPointeeType,
ToType, Context);
return true;
}
const PointerType *FromTypePtr = FromType->getAs<PointerType>();
if (!FromTypePtr)
return false;
QualType FromPointeeType = FromTypePtr->getPointeeType();
// If the unqualified pointee types are the same, this can't be a
// pointer conversion, so don't do all of the work below.
if (Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType))
return false;
// An rvalue of type "pointer to cv T," where T is an object type,
// can be converted to an rvalue of type "pointer to cv void" (C++
// 4.10p2).
if (FromPointeeType->isIncompleteOrObjectType() &&
ToPointeeType->isVoidType()) {
ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
ToPointeeType,
ToType, Context,
/*StripObjCLifetime=*/true);
return true;
}
// MSVC allows implicit function to void* type conversion.
if (getLangOpts().MSVCCompat && FromPointeeType->isFunctionType() &&
ToPointeeType->isVoidType()) {
ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
ToPointeeType,
ToType, Context);
return true;
}
// When we're overloading in C, we allow a special kind of pointer
// conversion for compatible-but-not-identical pointee types.
if (!getLangOpts().CPlusPlus &&
Context.typesAreCompatible(FromPointeeType, ToPointeeType)) {
ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
ToPointeeType,
ToType, Context);
return true;
}
// C++ [conv.ptr]p3:
//
// An rvalue of type "pointer to cv D," where D is a class type,
// can be converted to an rvalue of type "pointer to cv B," where
// B is a base class (clause 10) of D. If B is an inaccessible
// (clause 11) or ambiguous (10.2) base class of D, a program that
// necessitates this conversion is ill-formed. The result of the
// conversion is a pointer to the base class sub-object of the
// derived class object. The null pointer value is converted to
// the null pointer value of the destination type.
//
// Note that we do not check for ambiguity or inaccessibility
// here. That is handled by CheckPointerConversion.
if (getLangOpts().CPlusPlus &&
FromPointeeType->isRecordType() && ToPointeeType->isRecordType() &&
!Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType) &&
IsDerivedFrom(From->getLocStart(), FromPointeeType, ToPointeeType)) {
ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
ToPointeeType,
ToType, Context);
return true;
}
if (FromPointeeType->isVectorType() && ToPointeeType->isVectorType() &&
Context.areCompatibleVectorTypes(FromPointeeType, ToPointeeType)) {
ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
ToPointeeType,
ToType, Context);
return true;
}
return false;
}
/// Adopt the given qualifiers for the given type.
static QualType AdoptQualifiers(ASTContext &Context, QualType T, Qualifiers Qs){
Qualifiers TQs = T.getQualifiers();
// Check whether qualifiers already match.
if (TQs == Qs)
return T;
if (Qs.compatiblyIncludes(TQs))
return Context.getQualifiedType(T, Qs);
return Context.getQualifiedType(T.getUnqualifiedType(), Qs);
}
/// isObjCPointerConversion - Determines whether this is an
/// Objective-C pointer conversion. Subroutine of IsPointerConversion,
/// with the same arguments and return values.
bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType,
bool &IncompatibleObjC) {
if (!getLangOpts().ObjC1)
return false;
// The set of qualifiers on the type we're converting from.
Qualifiers FromQualifiers = FromType.getQualifiers();
// First, we handle all conversions on ObjC object pointer types.
const ObjCObjectPointerType* ToObjCPtr =
ToType->getAs<ObjCObjectPointerType>();
const ObjCObjectPointerType *FromObjCPtr =
FromType->getAs<ObjCObjectPointerType>();
if (ToObjCPtr && FromObjCPtr) {
// If the pointee types are the same (ignoring qualifications),
// then this is not a pointer conversion.
if (Context.hasSameUnqualifiedType(ToObjCPtr->getPointeeType(),
FromObjCPtr->getPointeeType()))
return false;
// Conversion between Objective-C pointers.
if (Context.canAssignObjCInterfaces(ToObjCPtr, FromObjCPtr)) {
const ObjCInterfaceType* LHS = ToObjCPtr->getInterfaceType();
const ObjCInterfaceType* RHS = FromObjCPtr->getInterfaceType();
if (getLangOpts().CPlusPlus && LHS && RHS &&
!ToObjCPtr->getPointeeType().isAtLeastAsQualifiedAs(
FromObjCPtr->getPointeeType()))
return false;
ConvertedType = BuildSimilarlyQualifiedPointerType(FromObjCPtr,
ToObjCPtr->getPointeeType(),
ToType, Context);
ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
return true;
}
if (Context.canAssignObjCInterfaces(FromObjCPtr, ToObjCPtr)) {
// Okay: this is some kind of implicit downcast of Objective-C
// interfaces, which is permitted. However, we're going to
// complain about it.
IncompatibleObjC = true;
ConvertedType = BuildSimilarlyQualifiedPointerType(FromObjCPtr,
ToObjCPtr->getPointeeType(),
ToType, Context);
ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
return true;
}
}
// Beyond this point, both types need to be C pointers or block pointers.
QualType ToPointeeType;
if (const PointerType *ToCPtr = ToType->getAs<PointerType>())
ToPointeeType = ToCPtr->getPointeeType();
else if (const BlockPointerType *ToBlockPtr =
ToType->getAs<BlockPointerType>()) {
// Objective C++: We're able to convert from a pointer to any object
// to a block pointer type.
if (FromObjCPtr && FromObjCPtr->isObjCBuiltinType()) {
ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers);
return true;
}
ToPointeeType = ToBlockPtr->getPointeeType();
}
else if (FromType->getAs<BlockPointerType>() &&
ToObjCPtr && ToObjCPtr->isObjCBuiltinType()) {
// Objective C++: We're able to convert from a block pointer type to a
// pointer to any object.
ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers);
return true;
}
else
return false;
QualType FromPointeeType;
if (const PointerType *FromCPtr = FromType->getAs<PointerType>())
FromPointeeType = FromCPtr->getPointeeType();
else if (const BlockPointerType *FromBlockPtr =
FromType->getAs<BlockPointerType>())
FromPointeeType = FromBlockPtr->getPointeeType();
else
return false;
// If we have pointers to pointers, recursively check whether this
// is an Objective-C conversion.
if (FromPointeeType->isPointerType() && ToPointeeType->isPointerType() &&
isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType,
IncompatibleObjC)) {
// We always complain about this conversion.
IncompatibleObjC = true;
ConvertedType = Context.getPointerType(ConvertedType);
ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
return true;
}
// Allow conversion of pointee being objective-c pointer to another one;
// as in I* to id.
if (FromPointeeType->getAs<ObjCObjectPointerType>() &&
ToPointeeType->getAs<ObjCObjectPointerType>() &&
isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType,
IncompatibleObjC)) {
ConvertedType = Context.getPointerType(ConvertedType);
ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
return true;
}
// If we have pointers to functions or blocks, check whether the only
// differences in the argument and result types are in Objective-C
// pointer conversions. If so, we permit the conversion (but
// complain about it).
const FunctionProtoType *FromFunctionType
= FromPointeeType->getAs<FunctionProtoType>();
const FunctionProtoType *ToFunctionType
= ToPointeeType->getAs<FunctionProtoType>();
if (FromFunctionType && ToFunctionType) {
// If the function types are exactly the same, this isn't an
// Objective-C pointer conversion.
if (Context.getCanonicalType(FromPointeeType)
== Context.getCanonicalType(ToPointeeType))
return false;
// Perform the quick checks that will tell us whether these
// function types are obviously different.
if (FromFunctionType->getNumParams() != ToFunctionType->getNumParams() ||
FromFunctionType->isVariadic() != ToFunctionType->isVariadic() ||
FromFunctionType->getTypeQuals() != ToFunctionType->getTypeQuals())
return false;
bool HasObjCConversion = false;
if (Context.getCanonicalType(FromFunctionType->getReturnType()) ==
Context.getCanonicalType(ToFunctionType->getReturnType())) {
// Okay, the types match exactly. Nothing to do.
} else if (isObjCPointerConversion(FromFunctionType->getReturnType(),
ToFunctionType->getReturnType(),
ConvertedType, IncompatibleObjC)) {
// Okay, we have an Objective-C pointer conversion.
HasObjCConversion = true;
} else {
// Function types are too different. Abort.
return false;
}
// Check argument types.
for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumParams();
ArgIdx != NumArgs; ++ArgIdx) {
QualType FromArgType = FromFunctionType->getParamType(ArgIdx);
QualType ToArgType = ToFunctionType->getParamType(ArgIdx);
if (Context.getCanonicalType(FromArgType)
== Context.getCanonicalType(ToArgType)) {
// Okay, the types match exactly. Nothing to do.
} else if (isObjCPointerConversion(FromArgType, ToArgType,
ConvertedType, IncompatibleObjC)) {
// Okay, we have an Objective-C pointer conversion.
HasObjCConversion = true;
} else {
// Argument types are too different. Abort.
return false;
}
}
if (HasObjCConversion) {
// We had an Objective-C conversion. Allow this pointer
// conversion, but complain about it.
ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers);
IncompatibleObjC = true;
return true;
}
}
return false;
}
/// Determine whether this is an Objective-C writeback conversion,
/// used for parameter passing when performing automatic reference counting.
///
/// \param FromType The type we're converting form.
///
/// \param ToType The type we're converting to.
///
/// \param ConvertedType The type that will be produced after applying
/// this conversion.
bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType) {
if (!getLangOpts().ObjCAutoRefCount ||
Context.hasSameUnqualifiedType(FromType, ToType))
return false;
// Parameter must be a pointer to __autoreleasing (with no other qualifiers).
QualType ToPointee;
if (const PointerType *ToPointer = ToType->getAs<PointerType>())
ToPointee = ToPointer->getPointeeType();
else
return false;
Qualifiers ToQuals = ToPointee.getQualifiers();
if (!ToPointee->isObjCLifetimeType() ||
ToQuals.getObjCLifetime() != Qualifiers::OCL_Autoreleasing ||
!ToQuals.withoutObjCLifetime().empty())
return false;
// Argument must be a pointer to __strong to __weak.
QualType FromPointee;
if (const PointerType *FromPointer = FromType->getAs<PointerType>())
FromPointee = FromPointer->getPointeeType();
else
return false;
Qualifiers FromQuals = FromPointee.getQualifiers();
if (!FromPointee->isObjCLifetimeType() ||
(FromQuals.getObjCLifetime() != Qualifiers::OCL_Strong &&
FromQuals.getObjCLifetime() != Qualifiers::OCL_Weak))
return false;
// Make sure that we have compatible qualifiers.
FromQuals.setObjCLifetime(Qualifiers::OCL_Autoreleasing);
if (!ToQuals.compatiblyIncludes(FromQuals))
return false;
// Remove qualifiers from the pointee type we're converting from; they
// aren't used in the compatibility check belong, and we'll be adding back
// qualifiers (with __autoreleasing) if the compatibility check succeeds.
FromPointee = FromPointee.getUnqualifiedType();
// The unqualified form of the pointee types must be compatible.
ToPointee = ToPointee.getUnqualifiedType();
bool IncompatibleObjC;
if (Context.typesAreCompatible(FromPointee, ToPointee))
FromPointee = ToPointee;
else if (!isObjCPointerConversion(FromPointee, ToPointee, FromPointee,
IncompatibleObjC))
return false;
/// Construct the type we're converting to, which is a pointer to
/// __autoreleasing pointee.
FromPointee = Context.getQualifiedType(FromPointee, FromQuals);
ConvertedType = Context.getPointerType(FromPointee);
return true;
}
bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType) {
QualType ToPointeeType;
if (const BlockPointerType *ToBlockPtr =
ToType->getAs<BlockPointerType>())
ToPointeeType = ToBlockPtr->getPointeeType();
else
return false;
QualType FromPointeeType;
if (const BlockPointerType *FromBlockPtr =
FromType->getAs<BlockPointerType>())
FromPointeeType = FromBlockPtr->getPointeeType();
else
return false;
// We have pointer to blocks, check whether the only
// differences in the argument and result types are in Objective-C
// pointer conversions. If so, we permit the conversion.
const FunctionProtoType *FromFunctionType
= FromPointeeType->getAs<FunctionProtoType>();
const FunctionProtoType *ToFunctionType
= ToPointeeType->getAs<FunctionProtoType>();
if (!FromFunctionType || !ToFunctionType)
return false;
if (Context.hasSameType(FromPointeeType, ToPointeeType))
return true;
// Perform the quick checks that will tell us whether these
// function types are obviously different.
if (FromFunctionType->getNumParams() != ToFunctionType->getNumParams() ||
FromFunctionType->isVariadic() != ToFunctionType->isVariadic())
return false;
FunctionType::ExtInfo FromEInfo = FromFunctionType->getExtInfo();
FunctionType::ExtInfo ToEInfo = ToFunctionType->getExtInfo();
if (FromEInfo != ToEInfo)
return false;
bool IncompatibleObjC = false;
if (Context.hasSameType(FromFunctionType->getReturnType(),
ToFunctionType->getReturnType())) {
// Okay, the types match exactly. Nothing to do.
} else {
QualType RHS = FromFunctionType->getReturnType();
QualType LHS = ToFunctionType->getReturnType();
if ((!getLangOpts().CPlusPlus || !RHS->isRecordType()) &&
!RHS.hasQualifiers() && LHS.hasQualifiers())
LHS = LHS.getUnqualifiedType();
if (Context.hasSameType(RHS,LHS)) {
// OK exact match.
} else if (isObjCPointerConversion(RHS, LHS,
ConvertedType, IncompatibleObjC)) {
if (IncompatibleObjC)
return false;
// Okay, we have an Objective-C pointer conversion.
}
else
return false;
}
// Check argument types.
for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumParams();
ArgIdx != NumArgs; ++ArgIdx) {
IncompatibleObjC = false;
QualType FromArgType = FromFunctionType->getParamType(ArgIdx);
QualType ToArgType = ToFunctionType->getParamType(ArgIdx);
if (Context.hasSameType(FromArgType, ToArgType)) {
// Okay, the types match exactly. Nothing to do.
} else if (isObjCPointerConversion(ToArgType, FromArgType,
ConvertedType, IncompatibleObjC)) {
if (IncompatibleObjC)
return false;
// Okay, we have an Objective-C pointer conversion.
} else
// Argument types are too different. Abort.
return false;
}
SmallVector<FunctionProtoType::ExtParameterInfo, 4> NewParamInfos;
bool CanUseToFPT, CanUseFromFPT;
if (!Context.mergeExtParameterInfo(ToFunctionType, FromFunctionType,
CanUseToFPT, CanUseFromFPT,
NewParamInfos))
return false;
ConvertedType = ToType;
return true;
}
enum {
ft_default,
ft_different_class,
ft_parameter_arity,
ft_parameter_mismatch,
ft_return_type,
ft_qualifer_mismatch,
ft_noexcept
};
/// Attempts to get the FunctionProtoType from a Type. Handles
/// MemberFunctionPointers properly.
static const FunctionProtoType *tryGetFunctionProtoType(QualType FromType) {
if (auto *FPT = FromType->getAs<FunctionProtoType>())
return FPT;
if (auto *MPT = FromType->getAs<MemberPointerType>())
return MPT->getPointeeType()->getAs<FunctionProtoType>();
return nullptr;
}
/// HandleFunctionTypeMismatch - Gives diagnostic information for differeing
/// function types. Catches different number of parameter, mismatch in
/// parameter types, and different return types.
void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType) {
// If either type is not valid, include no extra info.
if (FromType.isNull() || ToType.isNull()) {
PDiag << ft_default;
return;
}
// Get the function type from the pointers.
if (FromType->isMemberPointerType() && ToType->isMemberPointerType()) {
const MemberPointerType *FromMember = FromType->getAs<MemberPointerType>(),
*ToMember = ToType->getAs<MemberPointerType>();
if (!Context.hasSameType(FromMember->getClass(), ToMember->getClass())) {
PDiag << ft_different_class << QualType(ToMember->getClass(), 0)
<< QualType(FromMember->getClass(), 0);
return;
}
FromType = FromMember->getPointeeType();
ToType = ToMember->getPointeeType();
}
if (FromType->isPointerType())
FromType = FromType->getPointeeType();
if (ToType->isPointerType())
ToType = ToType->getPointeeType();
// Remove references.
FromType = FromType.getNonReferenceType();
ToType = ToType.getNonReferenceType();
// Don't print extra info for non-specialized template functions.
if (FromType->isInstantiationDependentType() &&
!FromType->getAs<TemplateSpecializationType>()) {
PDiag << ft_default;
return;
}
// No extra info for same types.
if (Context.hasSameType(FromType, ToType)) {
PDiag << ft_default;
return;
}
const FunctionProtoType *FromFunction = tryGetFunctionProtoType(FromType),
*ToFunction = tryGetFunctionProtoType(ToType);
// Both types need to be function types.
if (!FromFunction || !ToFunction) {
PDiag << ft_default;
return;
}
if (FromFunction->getNumParams() != ToFunction->getNumParams()) {
PDiag << ft_parameter_arity << ToFunction->getNumParams()
<< FromFunction->getNumParams();
return;
}
// Handle different parameter types.
unsigned ArgPos;
if (!FunctionParamTypesAreEqual(FromFunction, ToFunction, &ArgPos)) {
PDiag << ft_parameter_mismatch << ArgPos + 1
<< ToFunction->getParamType(ArgPos)
<< FromFunction->getParamType(ArgPos);
return;
}
// Handle different return type.
if (!Context.hasSameType(FromFunction->getReturnType(),
ToFunction->getReturnType())) {
PDiag << ft_return_type << ToFunction->getReturnType()
<< FromFunction->getReturnType();
return;
}
unsigned FromQuals = FromFunction->getTypeQuals(),
ToQuals = ToFunction->getTypeQuals();
if (FromQuals != ToQuals) {
PDiag << ft_qualifer_mismatch << ToQuals << FromQuals;
return;
}
// Handle exception specification differences on canonical type (in C++17
// onwards).
if (cast<FunctionProtoType>(FromFunction->getCanonicalTypeUnqualified())
->isNothrow() !=
cast<FunctionProtoType>(ToFunction->getCanonicalTypeUnqualified())
->isNothrow()) {
PDiag << ft_noexcept;
return;
}
// Unable to find a difference, so add no extra info.
PDiag << ft_default;
}
/// FunctionParamTypesAreEqual - This routine checks two function proto types
/// for equality of their argument types. Caller has already checked that
/// they have same number of arguments. If the parameters are different,
/// ArgPos will have the parameter index of the first different parameter.
bool Sema::FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos) {
for (FunctionProtoType::param_type_iterator O = OldType->param_type_begin(),
N = NewType->param_type_begin(),
E = OldType->param_type_end();
O && (O != E); ++O, ++N) {
if (!Context.hasSameType(O->getUnqualifiedType(),
N->getUnqualifiedType())) {
if (ArgPos)
*ArgPos = O - OldType->param_type_begin();
return false;
}
}
return true;
}
/// CheckPointerConversion - Check the pointer conversion from the
/// expression From to the type ToType. This routine checks for
/// ambiguous or inaccessible derived-to-base pointer
/// conversions for which IsPointerConversion has already returned
/// true. It returns true and produces a diagnostic if there was an
/// error, or returns false otherwise.
bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose) {
QualType FromType = From->getType();
bool IsCStyleOrFunctionalCast = IgnoreBaseAccess;
Kind = CK_BitCast;
if (Diagnose && !IsCStyleOrFunctionalCast && !FromType->isAnyPointerType() &&
From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull) ==
Expr::NPCK_ZeroExpression) {
if (Context.hasSameUnqualifiedType(From->getType(), Context.BoolTy))
DiagRuntimeBehavior(From->getExprLoc(), From,
PDiag(diag::warn_impcast_bool_to_null_pointer)
<< ToType << From->getSourceRange());
else if (!isUnevaluatedContext())
Diag(From->getExprLoc(), diag::warn_non_literal_null_pointer)
<< ToType << From->getSourceRange();
}
if (const PointerType *ToPtrType = ToType->getAs<PointerType>()) {
if (const PointerType *FromPtrType = FromType->getAs<PointerType>()) {
QualType FromPointeeType = FromPtrType->getPointeeType(),
ToPointeeType = ToPtrType->getPointeeType();
if (FromPointeeType->isRecordType() && ToPointeeType->isRecordType() &&
!Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType)) {
// We must have a derived-to-base conversion. Check an
// ambiguous or inaccessible conversion.
unsigned InaccessibleID = 0;
unsigned AmbigiousID = 0;
if (Diagnose) {
InaccessibleID = diag::err_upcast_to_inaccessible_base;
AmbigiousID = diag::err_ambiguous_derived_to_base_conv;
}
if (CheckDerivedToBaseConversion(
FromPointeeType, ToPointeeType, InaccessibleID, AmbigiousID,
From->getExprLoc(), From->getSourceRange(), DeclarationName(),
&BasePath, IgnoreBaseAccess))
return true;
// The conversion was successful.
Kind = CK_DerivedToBase;
}
if (Diagnose && !IsCStyleOrFunctionalCast &&
FromPointeeType->isFunctionType() && ToPointeeType->isVoidType()) {
assert(getLangOpts().MSVCCompat &&
"this should only be possible with MSVCCompat!");
Diag(From->getExprLoc(), diag::ext_ms_impcast_fn_obj)
<< From->getSourceRange();
}
}
} else if (const ObjCObjectPointerType *ToPtrType =
ToType->getAs<ObjCObjectPointerType>()) {
if (const ObjCObjectPointerType *FromPtrType =
FromType->getAs<ObjCObjectPointerType>()) {
// Objective-C++ conversions are always okay.
// FIXME: We should have a different class of conversions for the
// Objective-C++ implicit conversions.
if (FromPtrType->isObjCBuiltinType() || ToPtrType->isObjCBuiltinType())
return false;
} else if (FromType->isBlockPointerType()) {
Kind = CK_BlockPointerToObjCPointerCast;
} else {
Kind = CK_CPointerToObjCPointerCast;
}
} else if (ToType->isBlockPointerType()) {
if (!FromType->isBlockPointerType())
Kind = CK_AnyPointerToBlockPointerCast;
}
// We shouldn't fall into this case unless it's valid for other
// reasons.
if (From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
Kind = CK_NullToPointer;
return false;
}
/// IsMemberPointerConversion - Determines whether the conversion of the
/// expression From, which has the (possibly adjusted) type FromType, can be
/// converted to the type ToType via a member pointer conversion (C++ 4.11).
/// If so, returns true and places the converted type (that might differ from
/// ToType in its cv-qualifiers at some level) into ConvertedType.
bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType,
QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType) {
const MemberPointerType *ToTypePtr = ToType->getAs<MemberPointerType>();
if (!ToTypePtr)
return false;
// A null pointer constant can be converted to a member pointer (C++ 4.11p1)
if (From->isNullPointerConstant(Context,
InOverloadResolution? Expr::NPC_ValueDependentIsNotNull
: Expr::NPC_ValueDependentIsNull)) {
ConvertedType = ToType;
return true;
}
// Otherwise, both types have to be member pointers.
const MemberPointerType *FromTypePtr = FromType->getAs<MemberPointerType>();
if (!FromTypePtr)
return false;
// A pointer to member of B can be converted to a pointer to member of D,
// where D is derived from B (C++ 4.11p2).
QualType FromClass(FromTypePtr->getClass(), 0);
QualType ToClass(ToTypePtr->getClass(), 0);
if (!Context.hasSameUnqualifiedType(FromClass, ToClass) &&
IsDerivedFrom(From->getLocStart(), ToClass, FromClass)) {
ConvertedType = Context.getMemberPointerType(FromTypePtr->getPointeeType(),
ToClass.getTypePtr());
return true;
}
return false;
}
/// CheckMemberPointerConversion - Check the member pointer conversion from the
/// expression From to the type ToType. This routine checks for ambiguous or
/// virtual or inaccessible base-to-derived member pointer conversions
/// for which IsMemberPointerConversion has already returned true. It returns
/// true and produces a diagnostic if there was an error, or returns false
/// otherwise.
bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess) {
QualType FromType = From->getType();
const MemberPointerType *FromPtrType = FromType->getAs<MemberPointerType>();
if (!FromPtrType) {
// This must be a null pointer to member pointer conversion
assert(From->isNullPointerConstant(Context,
Expr::NPC_ValueDependentIsNull) &&
"Expr must be null pointer constant!");
Kind = CK_NullToMemberPointer;
return false;
}
const MemberPointerType *ToPtrType = ToType->getAs<MemberPointerType>();
assert(ToPtrType && "No member pointer cast has a target type "
"that is not a member pointer.");
QualType FromClass = QualType(FromPtrType->getClass(), 0);
QualType ToClass = QualType(ToPtrType->getClass(), 0);
// FIXME: What about dependent types?
assert(FromClass->isRecordType() && "Pointer into non-class.");
assert(ToClass->isRecordType() && "Pointer into non-class.");
CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
/*DetectVirtual=*/true);
bool DerivationOkay =
IsDerivedFrom(From->getLocStart(), ToClass, FromClass, Paths);
assert(DerivationOkay &&
"Should not have been called if derivation isn't OK.");
(void)DerivationOkay;
if (Paths.isAmbiguous(Context.getCanonicalType(FromClass).
getUnqualifiedType())) {
std::string PathDisplayStr = getAmbiguousPathsDisplayString(Paths);
Diag(From->getExprLoc(), diag::err_ambiguous_memptr_conv)
<< 0 << FromClass << ToClass << PathDisplayStr << From->getSourceRange();
return true;
}
if (const RecordType *VBase = Paths.getDetectedVirtual()) {
Diag(From->getExprLoc(), diag::err_memptr_conv_via_virtual)
<< FromClass << ToClass << QualType(VBase, 0)
<< From->getSourceRange();
return true;
}
if (!IgnoreBaseAccess)
CheckBaseClassAccess(From->getExprLoc(), FromClass, ToClass,
Paths.front(),
diag::err_downcast_from_inaccessible_base);
// Must be a base to derived member conversion.
BuildBasePathArray(Paths, BasePath);
Kind = CK_BaseToDerivedMemberPointer;
return false;
}
/// Determine whether the lifetime conversion between the two given
/// qualifiers sets is nontrivial.
static bool isNonTrivialObjCLifetimeConversion(Qualifiers FromQuals,
Qualifiers ToQuals) {
// Converting anything to const __unsafe_unretained is trivial.
if (ToQuals.hasConst() &&
ToQuals.getObjCLifetime() == Qualifiers::OCL_ExplicitNone)
return false;
return true;
}
/// IsQualificationConversion - Determines whether the conversion from
/// an rvalue of type FromType to ToType is a qualification conversion
/// (C++ 4.4).
///
/// \param ObjCLifetimeConversion Output parameter that will be set to indicate
/// when the qualification conversion involves a change in the Objective-C
/// object lifetime.
bool
Sema::IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion) {
FromType = Context.getCanonicalType(FromType);
ToType = Context.getCanonicalType(ToType);
ObjCLifetimeConversion = false;
// If FromType and ToType are the same type, this is not a
// qualification conversion.
if (FromType.getUnqualifiedType() == ToType.getUnqualifiedType())
return false;
// (C++ 4.4p4):
// A conversion can add cv-qualifiers at levels other than the first
// in multi-level pointers, subject to the following rules: [...]
bool PreviousToQualsIncludeConst = true;
bool UnwrappedAnyPointer = false;
while (Context.UnwrapSimilarTypes(FromType, ToType)) {
// Within each iteration of the loop, we check the qualifiers to
// determine if this still looks like a qualification
// conversion. Then, if all is well, we unwrap one more level of
// pointers or pointers-to-members and do it all again
// until there are no more pointers or pointers-to-members left to
// unwrap.
UnwrappedAnyPointer = true;
Qualifiers FromQuals = FromType.getQualifiers();
Qualifiers ToQuals = ToType.getQualifiers();
// Ignore __unaligned qualifier if this type is void.
if (ToType.getUnqualifiedType()->isVoidType())
FromQuals.removeUnaligned();
// Objective-C ARC:
// Check Objective-C lifetime conversions.
if (FromQuals.getObjCLifetime() != ToQuals.getObjCLifetime() &&
UnwrappedAnyPointer) {
if (ToQuals.compatiblyIncludesObjCLifetime(FromQuals)) {
if (isNonTrivialObjCLifetimeConversion(FromQuals, ToQuals))
ObjCLifetimeConversion = true;
FromQuals.removeObjCLifetime();
ToQuals.removeObjCLifetime();
} else {
// Qualification conversions cannot cast between different
// Objective-C lifetime qualifiers.
return false;
}
}
// Allow addition/removal of GC attributes but not changing GC attributes.
if (FromQuals.getObjCGCAttr() != ToQuals.getObjCGCAttr() &&
(!FromQuals.hasObjCGCAttr() || !ToQuals.hasObjCGCAttr())) {
FromQuals.removeObjCGCAttr();
ToQuals.removeObjCGCAttr();
}
// -- for every j > 0, if const is in cv 1,j then const is in cv
// 2,j, and similarly for volatile.
if (!CStyle && !ToQuals.compatiblyIncludes(FromQuals))
return false;
// -- if the cv 1,j and cv 2,j are different, then const is in
// every cv for 0 < k < j.
if (!CStyle && FromQuals.getCVRQualifiers() != ToQuals.getCVRQualifiers()
&& !PreviousToQualsIncludeConst)
return false;
// Keep track of whether all prior cv-qualifiers in the "to" type
// include const.
PreviousToQualsIncludeConst
= PreviousToQualsIncludeConst && ToQuals.hasConst();
}
// Allows address space promotion by language rules implemented in
// Type::Qualifiers::isAddressSpaceSupersetOf.
Qualifiers FromQuals = FromType.getQualifiers();
Qualifiers ToQuals = ToType.getQualifiers();
if (!ToQuals.isAddressSpaceSupersetOf(FromQuals) &&
!FromQuals.isAddressSpaceSupersetOf(ToQuals)) {
return false;
}
// We are left with FromType and ToType being the pointee types
// after unwrapping the original FromType and ToType the same number
// of types. If we unwrapped any pointers, and if FromType and
// ToType have the same unqualified type (since we checked
// qualifiers above), then this is a qualification conversion.
return UnwrappedAnyPointer && Context.hasSameUnqualifiedType(FromType,ToType);
}
/// - Determine whether this is a conversion from a scalar type to an
/// atomic type.
///
/// If successful, updates \c SCS's second and third steps in the conversion
/// sequence to finish the conversion.
static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType,
bool InOverloadResolution,
StandardConversionSequence &SCS,
bool CStyle) {
const AtomicType *ToAtomic = ToType->getAs<AtomicType>();
if (!ToAtomic)
return false;
StandardConversionSequence InnerSCS;
if (!IsStandardConversion(S, From, ToAtomic->getValueType(),
InOverloadResolution, InnerSCS,
CStyle, /*AllowObjCWritebackConversion=*/false))
return false;
SCS.Second = InnerSCS.Second;
SCS.setToType(1, InnerSCS.getToType(1));
SCS.Third = InnerSCS.Third;
SCS.QualificationIncludesObjCLifetime
= InnerSCS.QualificationIncludesObjCLifetime;
SCS.setToType(2, InnerSCS.getToType(2));
return true;
}
static bool isFirstArgumentCompatibleWithType(ASTContext &Context,
CXXConstructorDecl *Constructor,
QualType Type) {
const FunctionProtoType *CtorType =
Constructor->getType()->getAs<FunctionProtoType>();
if (CtorType->getNumParams() > 0) {
QualType FirstArg = CtorType->getParamType(0);
if (Context.hasSameUnqualifiedType(Type, FirstArg.getNonReferenceType()))
return true;
}
return false;
}
static OverloadingResult
IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType,
CXXRecordDecl *To,
UserDefinedConversionSequence &User,
OverloadCandidateSet &CandidateSet,
bool AllowExplicit) {
CandidateSet.clear(OverloadCandidateSet::CSK_InitByUserDefinedConversion);
for (auto *D : S.LookupConstructors(To)) {
auto Info = getConstructorInfo(D);
if (!Info)
continue;
bool Usable = !Info.Constructor->isInvalidDecl() &&
S.isInitListConstructor(Info.Constructor) &&
(AllowExplicit || !Info.Constructor->isExplicit());
if (Usable) {
// If the first argument is (a reference to) the target type,
// suppress conversions.
bool SuppressUserConversions = isFirstArgumentCompatibleWithType(
S.Context, Info.Constructor, ToType);
if (Info.ConstructorTmpl)
S.AddTemplateOverloadCandidate(Info.ConstructorTmpl, Info.FoundDecl,
/*ExplicitArgs*/ nullptr, From,
CandidateSet, SuppressUserConversions);
else
S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl, From,
CandidateSet, SuppressUserConversions);
}
}
bool HadMultipleCandidates = (CandidateSet.size() > 1);
OverloadCandidateSet::iterator Best;
switch (auto Result =
CandidateSet.BestViableFunction(S, From->getLocStart(),
Best)) {
case OR_Deleted:
case OR_Success: {
// Record the standard conversion we used and the conversion function.
CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function);
QualType ThisType = Constructor->getThisType(S.Context);
// Initializer lists don't have conversions as such.
User.Before.setAsIdentityConversion();
User.HadMultipleCandidates = HadMultipleCandidates;
User.ConversionFunction = Constructor;
User.FoundConversionFunction = Best->FoundDecl;
User.After.setAsIdentityConversion();
User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType());
User.After.setAllToTypes(ToType);
return Result;
}
case OR_No_Viable_Function:
return OR_No_Viable_Function;
case OR_Ambiguous:
return OR_Ambiguous;
}
llvm_unreachable("Invalid OverloadResult!");
}
/// Determines whether there is a user-defined conversion sequence
/// (C++ [over.ics.user]) that converts expression From to the type
/// ToType. If such a conversion exists, User will contain the
/// user-defined conversion sequence that performs such a conversion
/// and this routine will return true. Otherwise, this routine returns
/// false and User is unspecified.
///
/// \param AllowExplicit true if the conversion should consider C++0x
/// "explicit" conversion functions as well as non-explicit conversion
/// functions (C++0x [class.conv.fct]p2).
///
/// \param AllowObjCConversionOnExplicit true if the conversion should
/// allow an extra Objective-C pointer conversion on uses of explicit
/// constructors. Requires \c AllowExplicit to also be set.
static OverloadingResult
IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
UserDefinedConversionSequence &User,
OverloadCandidateSet &CandidateSet,
bool AllowExplicit,
bool AllowObjCConversionOnExplicit) {
assert(AllowExplicit || !AllowObjCConversionOnExplicit);
CandidateSet.clear(OverloadCandidateSet::CSK_InitByUserDefinedConversion);
// Whether we will only visit constructors.
bool ConstructorsOnly = false;
// If the type we are conversion to is a class type, enumerate its
// constructors.
if (const RecordType *ToRecordType = ToType->getAs<RecordType>()) {
// C++ [over.match.ctor]p1:
// When objects of class type are direct-initialized (8.5), or
// copy-initialized from an expression of the same or a
// derived class type (8.5), overload resolution selects the
// constructor. [...] For copy-initialization, the candidate
// functions are all the converting constructors (12.3.1) of
// that class. The argument list is the expression-list within
// the parentheses of the initializer.
if (S.Context.hasSameUnqualifiedType(ToType, From->getType()) ||
(From->getType()->getAs<RecordType>() &&
S.IsDerivedFrom(From->getLocStart(), From->getType(), ToType)))
ConstructorsOnly = true;
if (!S.isCompleteType(From->getExprLoc(), ToType)) {
// We're not going to find any constructors.
} else if (CXXRecordDecl *ToRecordDecl
= dyn_cast<CXXRecordDecl>(ToRecordType->getDecl())) {
Expr **Args = &From;
unsigned NumArgs = 1;
bool ListInitializing = false;
if (InitListExpr *InitList = dyn_cast<InitListExpr>(From)) {
// But first, see if there is an init-list-constructor that will work.
OverloadingResult Result = IsInitializerListConstructorConversion(
S, From, ToType, ToRecordDecl, User, CandidateSet, AllowExplicit);
if (Result != OR_No_Viable_Function)
return Result;
// Never mind.
CandidateSet.clear(
OverloadCandidateSet::CSK_InitByUserDefinedConversion);
// If we're list-initializing, we pass the individual elements as
// arguments, not the entire list.
Args = InitList->getInits();
NumArgs = InitList->getNumInits();
ListInitializing = true;
}
for (auto *D : S.LookupConstructors(ToRecordDecl)) {
auto Info = getConstructorInfo(D);
if (!Info)
continue;
bool Usable = !Info.Constructor->isInvalidDecl();
if (ListInitializing)
Usable = Usable && (AllowExplicit || !Info.Constructor->isExplicit());
else
Usable = Usable &&
Info.Constructor->isConvertingConstructor(AllowExplicit);
if (Usable) {
bool SuppressUserConversions = !ConstructorsOnly;
if (SuppressUserConversions && ListInitializing) {
SuppressUserConversions = false;
if (NumArgs == 1) {
// If the first argument is (a reference to) the target type,
// suppress conversions.
SuppressUserConversions = isFirstArgumentCompatibleWithType(
S.Context, Info.Constructor, ToType);
}
}
if (Info.ConstructorTmpl)
S.AddTemplateOverloadCandidate(
Info.ConstructorTmpl, Info.FoundDecl,
/*ExplicitArgs*/ nullptr, llvm::makeArrayRef(Args, NumArgs),
CandidateSet, SuppressUserConversions);
else
// Allow one user-defined conversion when user specifies a
// From->ToType conversion via an static cast (c-style, etc).
S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl,
llvm::makeArrayRef(Args, NumArgs),
CandidateSet, SuppressUserConversions);
}
}
}
}
// Enumerate conversion functions, if we're allowed to.
if (ConstructorsOnly || isa<InitListExpr>(From)) {
} else if (!S.isCompleteType(From->getLocStart(), From->getType())) {
// No conversion functions from incomplete types.
} else if (const RecordType *FromRecordType
= From->getType()->getAs<RecordType>()) {
if (CXXRecordDecl *FromRecordDecl
= dyn_cast<CXXRecordDecl>(FromRecordType->getDecl())) {
// Add all of the conversion functions as candidates.
const auto &Conversions = FromRecordDecl->getVisibleConversionFunctions();
for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
DeclAccessPair FoundDecl = I.getPair();
NamedDecl *D = FoundDecl.getDecl();
CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext());
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
CXXConversionDecl *Conv;
FunctionTemplateDecl *ConvTemplate;
if ((ConvTemplate = dyn_cast<FunctionTemplateDecl>(D)))
Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
else
Conv = cast<CXXConversionDecl>(D);
if (AllowExplicit || !Conv->isExplicit()) {
if (ConvTemplate)
S.AddTemplateConversionCandidate(ConvTemplate, FoundDecl,
ActingContext, From, ToType,
CandidateSet,
AllowObjCConversionOnExplicit);
else
S.AddConversionCandidate(Conv, FoundDecl, ActingContext,
From, ToType, CandidateSet,
AllowObjCConversionOnExplicit);
}
}
}
}
bool HadMultipleCandidates = (CandidateSet.size() > 1);
OverloadCandidateSet::iterator Best;
switch (auto Result = CandidateSet.BestViableFunction(S, From->getLocStart(),
Best)) {
case OR_Success:
case OR_Deleted:
// Record the standard conversion we used and the conversion function.
if (CXXConstructorDecl *Constructor
= dyn_cast<CXXConstructorDecl>(Best->Function)) {
// C++ [over.ics.user]p1:
// If the user-defined conversion is specified by a
// constructor (12.3.1), the initial standard conversion
// sequence converts the source type to the type required by
// the argument of the constructor.
//
QualType ThisType = Constructor->getThisType(S.Context);
if (isa<InitListExpr>(From)) {
// Initializer lists don't have conversions as such.
User.Before.setAsIdentityConversion();
} else {
if (Best->Conversions[0].isEllipsis())
User.EllipsisConversion = true;
else {
User.Before = Best->Conversions[0].Standard;
User.EllipsisConversion = false;
}
}
User.HadMultipleCandidates = HadMultipleCandidates;
User.ConversionFunction = Constructor;
User.FoundConversionFunction = Best->FoundDecl;
User.After.setAsIdentityConversion();
User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType());
User.After.setAllToTypes(ToType);
return Result;
}
if (CXXConversionDecl *Conversion
= dyn_cast<CXXConversionDecl>(Best->Function)) {
// C++ [over.ics.user]p1:
//
// [...] If the user-defined conversion is specified by a
// conversion function (12.3.2), the initial standard
// conversion sequence converts the source type to the
// implicit object parameter of the conversion function.
User.Before = Best->Conversions[0].Standard;
User.HadMultipleCandidates = HadMultipleCandidates;
User.ConversionFunction = Conversion;
User.FoundConversionFunction = Best->FoundDecl;
User.EllipsisConversion = false;
// C++ [over.ics.user]p2:
// The second standard conversion sequence converts the
// result of the user-defined conversion to the target type
// for the sequence. Since an implicit conversion sequence
// is an initialization, the special rules for
// initialization by user-defined conversion apply when
// selecting the best user-defined conversion for a
// user-defined conversion sequence (see 13.3.3 and
// 13.3.3.1).
User.After = Best->FinalConversion;
return Result;
}
llvm_unreachable("Not a constructor or conversion function?");
case OR_No_Viable_Function:
return OR_No_Viable_Function;
case OR_Ambiguous:
return OR_Ambiguous;
}
llvm_unreachable("Invalid OverloadResult!");
}
bool
Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
ImplicitConversionSequence ICS;
OverloadCandidateSet CandidateSet(From->getExprLoc(),
OverloadCandidateSet::CSK_Normal);
OverloadingResult OvResult =
IsUserDefinedConversion(*this, From, ToType, ICS.UserDefined,
CandidateSet, false, false);
if (OvResult == OR_Ambiguous)
Diag(From->getLocStart(), diag::err_typecheck_ambiguous_condition)
<< From->getType() << ToType << From->getSourceRange();
else if (OvResult == OR_No_Viable_Function && !CandidateSet.empty()) {
if (!RequireCompleteType(From->getLocStart(), ToType,
diag::err_typecheck_nonviable_condition_incomplete,
From->getType(), From->getSourceRange()))
Diag(From->getLocStart(), diag::err_typecheck_nonviable_condition)
<< false << From->getType() << From->getSourceRange() << ToType;
} else
return false;
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, From);
return true;
}
/// Compare the user-defined conversion functions or constructors
/// of two user-defined conversion sequences to determine whether any ordering
/// is possible.
static ImplicitConversionSequence::CompareKind
compareConversionFunctions(Sema &S, FunctionDecl *Function1,
FunctionDecl *Function2) {
if (!S.getLangOpts().ObjC1 || !S.getLangOpts().CPlusPlus11)
return ImplicitConversionSequence::Indistinguishable;
// Objective-C++:
// If both conversion functions are implicitly-declared conversions from
// a lambda closure type to a function pointer and a block pointer,
// respectively, always prefer the conversion to a function pointer,
// because the function pointer is more lightweight and is more likely
// to keep code working.
CXXConversionDecl *Conv1 = dyn_cast_or_null<CXXConversionDecl>(Function1);
if (!Conv1)
return ImplicitConversionSequence::Indistinguishable;
CXXConversionDecl *Conv2 = dyn_cast<CXXConversionDecl>(Function2);
if (!Conv2)
return ImplicitConversionSequence::Indistinguishable;
if (Conv1->getParent()->isLambda() && Conv2->getParent()->isLambda()) {
bool Block1 = Conv1->getConversionType()->isBlockPointerType();
bool Block2 = Conv2->getConversionType()->isBlockPointerType();
if (Block1 != Block2)
return Block1 ? ImplicitConversionSequence::Worse
: ImplicitConversionSequence::Better;
}
return ImplicitConversionSequence::Indistinguishable;
}
static bool hasDeprecatedStringLiteralToCharPtrConversion(
const ImplicitConversionSequence &ICS) {
return (ICS.isStandard() && ICS.Standard.DeprecatedStringLiteralToCharPtr) ||
(ICS.isUserDefined() &&
ICS.UserDefined.Before.DeprecatedStringLiteralToCharPtr);
}
/// CompareImplicitConversionSequences - Compare two implicit
/// conversion sequences to determine whether one is better than the
/// other or if they are indistinguishable (C++ 13.3.3.2).
static ImplicitConversionSequence::CompareKind
CompareImplicitConversionSequences(Sema &S, SourceLocation Loc,
const ImplicitConversionSequence& ICS1,
const ImplicitConversionSequence& ICS2)
{
// (C++ 13.3.3.2p2): When comparing the basic forms of implicit
// conversion sequences (as defined in 13.3.3.1)
// -- a standard conversion sequence (13.3.3.1.1) is a better
// conversion sequence than a user-defined conversion sequence or
// an ellipsis conversion sequence, and
// -- a user-defined conversion sequence (13.3.3.1.2) is a better
// conversion sequence than an ellipsis conversion sequence
// (13.3.3.1.3).
//
// C++0x [over.best.ics]p10:
// For the purpose of ranking implicit conversion sequences as
// described in 13.3.3.2, the ambiguous conversion sequence is
// treated as a user-defined sequence that is indistinguishable
// from any other user-defined conversion sequence.
// String literal to 'char *' conversion has been deprecated in C++03. It has
// been removed from C++11. We still accept this conversion, if it happens at
// the best viable function. Otherwise, this conversion is considered worse
// than ellipsis conversion. Consider this as an extension; this is not in the
// standard. For example:
//
// int &f(...); // #1
// void f(char*); // #2
// void g() { int &r = f("foo"); }
//
// In C++03, we pick #2 as the best viable function.
// In C++11, we pick #1 as the best viable function, because ellipsis
// conversion is better than string-literal to char* conversion (since there
// is no such conversion in C++11). If there was no #1 at all or #1 couldn't
// convert arguments, #2 would be the best viable function in C++11.
// If the best viable function has this conversion, a warning will be issued
// in C++03, or an ExtWarn (+SFINAE failure) will be issued in C++11.
if (S.getLangOpts().CPlusPlus11 && !S.getLangOpts().WritableStrings &&
hasDeprecatedStringLiteralToCharPtrConversion(ICS1) !=
hasDeprecatedStringLiteralToCharPtrConversion(ICS2))
return hasDeprecatedStringLiteralToCharPtrConversion(ICS1)
? ImplicitConversionSequence::Worse
: ImplicitConversionSequence::Better;
if (ICS1.getKindRank() < ICS2.getKindRank())
return ImplicitConversionSequence::Better;
if (ICS2.getKindRank() < ICS1.getKindRank())
return ImplicitConversionSequence::Worse;
// The following checks require both conversion sequences to be of
// the same kind.
if (ICS1.getKind() != ICS2.getKind())
return ImplicitConversionSequence::Indistinguishable;
ImplicitConversionSequence::CompareKind Result =
ImplicitConversionSequence::Indistinguishable;
// Two implicit conversion sequences of the same form are
// indistinguishable conversion sequences unless one of the
// following rules apply: (C++ 13.3.3.2p3):
// List-initialization sequence L1 is a better conversion sequence than
// list-initialization sequence L2 if:
// - L1 converts to std::initializer_list<X> for some X and L2 does not, or,
// if not that,
// - L1 converts to type "array of N1 T", L2 converts to type "array of N2 T",
// and N1 is smaller than N2.,
// even if one of the other rules in this paragraph would otherwise apply.
if (!ICS1.isBad()) {
if (ICS1.isStdInitializerListElement() &&
!ICS2.isStdInitializerListElement())
return ImplicitConversionSequence::Better;
if (!ICS1.isStdInitializerListElement() &&
ICS2.isStdInitializerListElement())
return ImplicitConversionSequence::Worse;
}
if (ICS1.isStandard())
// Standard conversion sequence S1 is a better conversion sequence than
// standard conversion sequence S2 if [...]
Result = CompareStandardConversionSequences(S, Loc,
ICS1.Standard, ICS2.Standard);
else if (ICS1.isUserDefined()) {
// User-defined conversion sequence U1 is a better conversion
// sequence than another user-defined conversion sequence U2 if
// they contain the same user-defined conversion function or
// constructor and if the second standard conversion sequence of
// U1 is better than the second standard conversion sequence of
// U2 (C++ 13.3.3.2p3).
if (ICS1.UserDefined.ConversionFunction ==
ICS2.UserDefined.ConversionFunction)
Result = CompareStandardConversionSequences(S, Loc,
ICS1.UserDefined.After,
ICS2.UserDefined.After);
else
Result = compareConversionFunctions(S,
ICS1.UserDefined.ConversionFunction,
ICS2.UserDefined.ConversionFunction);
}
return Result;
}
// Per 13.3.3.2p3, compare the given standard conversion sequences to
// determine if one is a proper subset of the other.
static ImplicitConversionSequence::CompareKind
compareStandardConversionSubsets(ASTContext &Context,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2) {
ImplicitConversionSequence::CompareKind Result
= ImplicitConversionSequence::Indistinguishable;
// the identity conversion sequence is considered to be a subsequence of
// any non-identity conversion sequence
if (SCS1.isIdentityConversion() && !SCS2.isIdentityConversion())
return ImplicitConversionSequence::Better;
else if (!SCS1.isIdentityConversion() && SCS2.isIdentityConversion())
return ImplicitConversionSequence::Worse;
if (SCS1.Second != SCS2.Second) {
if (SCS1.Second == ICK_Identity)
Result = ImplicitConversionSequence::Better;
else if (SCS2.Second == ICK_Identity)
Result = ImplicitConversionSequence::Worse;
else
return ImplicitConversionSequence::Indistinguishable;
} else if (!Context.hasSimilarType(SCS1.getToType(1), SCS2.getToType(1)))
return ImplicitConversionSequence::Indistinguishable;
if (SCS1.Third == SCS2.Third) {
return Context.hasSameType(SCS1.getToType(2), SCS2.getToType(2))? Result
: ImplicitConversionSequence::Indistinguishable;
}
if (SCS1.Third == ICK_Identity)
return Result == ImplicitConversionSequence::Worse
? ImplicitConversionSequence::Indistinguishable
: ImplicitConversionSequence::Better;
if (SCS2.Third == ICK_Identity)
return Result == ImplicitConversionSequence::Better
? ImplicitConversionSequence::Indistinguishable
: ImplicitConversionSequence::Worse;
return ImplicitConversionSequence::Indistinguishable;
}
/// Determine whether one of the given reference bindings is better
/// than the other based on what kind of bindings they are.
static bool
isBetterReferenceBindingKind(const StandardConversionSequence &SCS1,
const StandardConversionSequence &SCS2) {
// C++0x [over.ics.rank]p3b4:
// -- S1 and S2 are reference bindings (8.5.3) and neither refers to an
// implicit object parameter of a non-static member function declared
// without a ref-qualifier, and *either* S1 binds an rvalue reference
// to an rvalue and S2 binds an lvalue reference *or S1 binds an
// lvalue reference to a function lvalue and S2 binds an rvalue
// reference*.
//
// FIXME: Rvalue references. We're going rogue with the above edits,
// because the semantics in the current C++0x working paper (N3225 at the
// time of this writing) break the standard definition of std::forward
// and std::reference_wrapper when dealing with references to functions.
// Proposed wording changes submitted to CWG for consideration.
if (SCS1.BindsImplicitObjectArgumentWithoutRefQualifier ||
SCS2.BindsImplicitObjectArgumentWithoutRefQualifier)
return false;
return (!SCS1.IsLvalueReference && SCS1.BindsToRvalue &&
SCS2.IsLvalueReference) ||
(SCS1.IsLvalueReference && SCS1.BindsToFunctionLvalue &&
!SCS2.IsLvalueReference && SCS2.BindsToFunctionLvalue);
}
/// CompareStandardConversionSequences - Compare two standard
/// conversion sequences to determine whether one is better than the
/// other or if they are indistinguishable (C++ 13.3.3.2p3).
static ImplicitConversionSequence::CompareKind
CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2)
{
// Standard conversion sequence S1 is a better conversion sequence
// than standard conversion sequence S2 if (C++ 13.3.3.2p3):
// -- S1 is a proper subsequence of S2 (comparing the conversion
// sequences in the canonical form defined by 13.3.3.1.1,
// excluding any Lvalue Transformation; the identity conversion
// sequence is considered to be a subsequence of any
// non-identity conversion sequence) or, if not that,
if (ImplicitConversionSequence::CompareKind CK
= compareStandardConversionSubsets(S.Context, SCS1, SCS2))
return CK;
// -- the rank of S1 is better than the rank of S2 (by the rules
// defined below), or, if not that,
ImplicitConversionRank Rank1 = SCS1.getRank();
ImplicitConversionRank Rank2 = SCS2.getRank();
if (Rank1 < Rank2)
return ImplicitConversionSequence::Better;
else if (Rank2 < Rank1)
return ImplicitConversionSequence::Worse;
// (C++ 13.3.3.2p4): Two conversion sequences with the same rank
// are indistinguishable unless one of the following rules
// applies:
// A conversion that is not a conversion of a pointer, or
// pointer to member, to bool is better than another conversion
// that is such a conversion.
if (SCS1.isPointerConversionToBool() != SCS2.isPointerConversionToBool())
return SCS2.isPointerConversionToBool()
? ImplicitConversionSequence::Better
: ImplicitConversionSequence::Worse;
// C++ [over.ics.rank]p4b2:
//
// If class B is derived directly or indirectly from class A,
// conversion of B* to A* is better than conversion of B* to
// void*, and conversion of A* to void* is better than conversion
// of B* to void*.
bool SCS1ConvertsToVoid
= SCS1.isPointerConversionToVoidPointer(S.Context);
bool SCS2ConvertsToVoid
= SCS2.isPointerConversionToVoidPointer(S.Context);
if (SCS1ConvertsToVoid != SCS2ConvertsToVoid) {
// Exactly one of the conversion sequences is a conversion to
// a void pointer; it's the worse conversion.
return SCS2ConvertsToVoid ? ImplicitConversionSequence::Better
: ImplicitConversionSequence::Worse;
} else if (!SCS1ConvertsToVoid && !SCS2ConvertsToVoid) {
// Neither conversion sequence converts to a void pointer; compare
// their derived-to-base conversions.
if (ImplicitConversionSequence::CompareKind DerivedCK
= CompareDerivedToBaseConversions(S, Loc, SCS1, SCS2))
return DerivedCK;
} else if (SCS1ConvertsToVoid && SCS2ConvertsToVoid &&
!S.Context.hasSameType(SCS1.getFromType(), SCS2.getFromType())) {
// Both conversion sequences are conversions to void
// pointers. Compare the source types to determine if there's an
// inheritance relationship in their sources.
QualType FromType1 = SCS1.getFromType();
QualType FromType2 = SCS2.getFromType();
// Adjust the types we're converting from via the array-to-pointer
// conversion, if we need to.
if (SCS1.First == ICK_Array_To_Pointer)
FromType1 = S.Context.getArrayDecayedType(FromType1);
if (SCS2.First == ICK_Array_To_Pointer)
FromType2 = S.Context.getArrayDecayedType(FromType2);
QualType FromPointee1 = FromType1->getPointeeType().getUnqualifiedType();
QualType FromPointee2 = FromType2->getPointeeType().getUnqualifiedType();
if (S.IsDerivedFrom(Loc, FromPointee2, FromPointee1))
return ImplicitConversionSequence::Better;
else if (S.IsDerivedFrom(Loc, FromPointee1, FromPointee2))
return ImplicitConversionSequence::Worse;
// Objective-C++: If one interface is more specific than the
// other, it is the better one.
const ObjCObjectPointerType* FromObjCPtr1
= FromType1->getAs<ObjCObjectPointerType>();
const ObjCObjectPointerType* FromObjCPtr2
= FromType2->getAs<ObjCObjectPointerType>();
if (FromObjCPtr1 && FromObjCPtr2) {
bool AssignLeft = S.Context.canAssignObjCInterfaces(FromObjCPtr1,
FromObjCPtr2);
bool AssignRight = S.Context.canAssignObjCInterfaces(FromObjCPtr2,
FromObjCPtr1);
if (AssignLeft != AssignRight) {
return AssignLeft? ImplicitConversionSequence::Better
: ImplicitConversionSequence::Worse;
}
}
}
// Compare based on qualification conversions (C++ 13.3.3.2p3,
// bullet 3).
if (ImplicitConversionSequence::CompareKind QualCK
= CompareQualificationConversions(S, SCS1, SCS2))
return QualCK;
if (SCS1.ReferenceBinding && SCS2.ReferenceBinding) {
// Check for a better reference binding based on the kind of bindings.
if (isBetterReferenceBindingKind(SCS1, SCS2))
return ImplicitConversionSequence::Better;
else if (isBetterReferenceBindingKind(SCS2, SCS1))
return ImplicitConversionSequence::Worse;
// C++ [over.ics.rank]p3b4:
// -- S1 and S2 are reference bindings (8.5.3), and the types to
// which the references refer are the same type except for
// top-level cv-qualifiers, and the type to which the reference
// initialized by S2 refers is more cv-qualified than the type
// to which the reference initialized by S1 refers.
QualType T1 = SCS1.getToType(2);
QualType T2 = SCS2.getToType(2);
T1 = S.Context.getCanonicalType(T1);
T2 = S.Context.getCanonicalType(T2);
Qualifiers T1Quals, T2Quals;
QualType UnqualT1 = S.Context.getUnqualifiedArrayType(T1, T1Quals);
QualType UnqualT2 = S.Context.getUnqualifiedArrayType(T2, T2Quals);
if (UnqualT1 == UnqualT2) {
// Objective-C++ ARC: If the references refer to objects with different
// lifetimes, prefer bindings that don't change lifetime.
if (SCS1.ObjCLifetimeConversionBinding !=
SCS2.ObjCLifetimeConversionBinding) {
return SCS1.ObjCLifetimeConversionBinding
? ImplicitConversionSequence::Worse
: ImplicitConversionSequence::Better;
}
// If the type is an array type, promote the element qualifiers to the
// type for comparison.
if (isa<ArrayType>(T1) && T1Quals)
T1 = S.Context.getQualifiedType(UnqualT1, T1Quals);
if (isa<ArrayType>(T2) && T2Quals)
T2 = S.Context.getQualifiedType(UnqualT2, T2Quals);
if (T2.isMoreQualifiedThan(T1))
return ImplicitConversionSequence::Better;
else if (T1.isMoreQualifiedThan(T2))
return ImplicitConversionSequence::Worse;
}
}
// In Microsoft mode, prefer an integral conversion to a
// floating-to-integral conversion if the integral conversion
// is between types of the same size.
// For example:
// void f(float);
// void f(int);
// int main {
// long a;
// f(a);
// }
// Here, MSVC will call f(int) instead of generating a compile error
// as clang will do in standard mode.
if (S.getLangOpts().MSVCCompat && SCS1.Second == ICK_Integral_Conversion &&
SCS2.Second == ICK_Floating_Integral &&
S.Context.getTypeSize(SCS1.getFromType()) ==
S.Context.getTypeSize(SCS1.getToType(2)))
return ImplicitConversionSequence::Better;
return ImplicitConversionSequence::Indistinguishable;
}
/// CompareQualificationConversions - Compares two standard conversion
/// sequences to determine whether they can be ranked based on their
/// qualification conversions (C++ 13.3.3.2p3 bullet 3).
static ImplicitConversionSequence::CompareKind
CompareQualificationConversions(Sema &S,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2) {
// C++ 13.3.3.2p3:
// -- S1 and S2 differ only in their qualification conversion and
// yield similar types T1 and T2 (C++ 4.4), respectively, and the
// cv-qualification signature of type T1 is a proper subset of
// the cv-qualification signature of type T2, and S1 is not the
// deprecated string literal array-to-pointer conversion (4.2).
if (SCS1.First != SCS2.First || SCS1.Second != SCS2.Second ||
SCS1.Third != SCS2.Third || SCS1.Third != ICK_Qualification)
return ImplicitConversionSequence::Indistinguishable;
// FIXME: the example in the standard doesn't use a qualification
// conversion (!)
QualType T1 = SCS1.getToType(2);
QualType T2 = SCS2.getToType(2);
T1 = S.Context.getCanonicalType(T1);
T2 = S.Context.getCanonicalType(T2);
Qualifiers T1Quals, T2Quals;
QualType UnqualT1 = S.Context.getUnqualifiedArrayType(T1, T1Quals);
QualType UnqualT2 = S.Context.getUnqualifiedArrayType(T2, T2Quals);
// If the types are the same, we won't learn anything by unwrapped
// them.
if (UnqualT1 == UnqualT2)
return ImplicitConversionSequence::Indistinguishable;
// If the type is an array type, promote the element qualifiers to the type
// for comparison.
if (isa<ArrayType>(T1) && T1Quals)
T1 = S.Context.getQualifiedType(UnqualT1, T1Quals);
if (isa<ArrayType>(T2) && T2Quals)
T2 = S.Context.getQualifiedType(UnqualT2, T2Quals);
ImplicitConversionSequence::CompareKind Result
= ImplicitConversionSequence::Indistinguishable;
// Objective-C++ ARC:
// Prefer qualification conversions not involving a change in lifetime
// to qualification conversions that do not change lifetime.
if (SCS1.QualificationIncludesObjCLifetime !=
SCS2.QualificationIncludesObjCLifetime) {
Result = SCS1.QualificationIncludesObjCLifetime
? ImplicitConversionSequence::Worse
: ImplicitConversionSequence::Better;
}
while (S.Context.UnwrapSimilarTypes(T1, T2)) {
// Within each iteration of the loop, we check the qualifiers to
// determine if this still looks like a qualification
// conversion. Then, if all is well, we unwrap one more level of
// pointers or pointers-to-members and do it all again
// until there are no more pointers or pointers-to-members left
// to unwrap. This essentially mimics what
// IsQualificationConversion does, but here we're checking for a
// strict subset of qualifiers.
if (T1.getCVRQualifiers() == T2.getCVRQualifiers())
// The qualifiers are the same, so this doesn't tell us anything
// about how the sequences rank.
;
else if (T2.isMoreQualifiedThan(T1)) {
// T1 has fewer qualifiers, so it could be the better sequence.
if (Result == ImplicitConversionSequence::Worse)
// Neither has qualifiers that are a subset of the other's
// qualifiers.
return ImplicitConversionSequence::Indistinguishable;
Result = ImplicitConversionSequence::Better;
} else if (T1.isMoreQualifiedThan(T2)) {
// T2 has fewer qualifiers, so it could be the better sequence.
if (Result == ImplicitConversionSequence::Better)
// Neither has qualifiers that are a subset of the other's
// qualifiers.
return ImplicitConversionSequence::Indistinguishable;
Result = ImplicitConversionSequence::Worse;
} else {
// Qualifiers are disjoint.
return ImplicitConversionSequence::Indistinguishable;
}
// If the types after this point are equivalent, we're done.
if (S.Context.hasSameUnqualifiedType(T1, T2))
break;
}
// Check that the winning standard conversion sequence isn't using
// the deprecated string literal array to pointer conversion.
switch (Result) {
case ImplicitConversionSequence::Better:
if (SCS1.DeprecatedStringLiteralToCharPtr)
Result = ImplicitConversionSequence::Indistinguishable;
break;
case ImplicitConversionSequence::Indistinguishable:
break;
case ImplicitConversionSequence::Worse:
if (SCS2.DeprecatedStringLiteralToCharPtr)
Result = ImplicitConversionSequence::Indistinguishable;
break;
}
return Result;
}
/// CompareDerivedToBaseConversions - Compares two standard conversion
/// sequences to determine whether they can be ranked based on their
/// various kinds of derived-to-base conversions (C++
/// [over.ics.rank]p4b3). As part of these checks, we also look at
/// conversions between Objective-C interface types.
static ImplicitConversionSequence::CompareKind
CompareDerivedToBaseConversions(Sema &S, SourceLocation Loc,
const StandardConversionSequence& SCS1,
const StandardConversionSequence& SCS2) {
QualType FromType1 = SCS1.getFromType();
QualType ToType1 = SCS1.getToType(1);
QualType FromType2 = SCS2.getFromType();
QualType ToType2 = SCS2.getToType(1);
// Adjust the types we're converting from via the array-to-pointer
// conversion, if we need to.
if (SCS1.First == ICK_Array_To_Pointer)
FromType1 = S.Context.getArrayDecayedType(FromType1);
if (SCS2.First == ICK_Array_To_Pointer)
FromType2 = S.Context.getArrayDecayedType(FromType2);
// Canonicalize all of the types.
FromType1 = S.Context.getCanonicalType(FromType1);
ToType1 = S.Context.getCanonicalType(ToType1);
FromType2 = S.Context.getCanonicalType(FromType2);
ToType2 = S.Context.getCanonicalType(ToType2);
// C++ [over.ics.rank]p4b3:
//
// If class B is derived directly or indirectly from class A and
// class C is derived directly or indirectly from B,
//
// Compare based on pointer conversions.
if (SCS1.Second == ICK_Pointer_Conversion &&
SCS2.Second == ICK_Pointer_Conversion &&
/*FIXME: Remove if Objective-C id conversions get their own rank*/
FromType1->isPointerType() && FromType2->isPointerType() &&
ToType1->isPointerType() && ToType2->isPointerType()) {
QualType FromPointee1
= FromType1->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
QualType ToPointee1
= ToType1->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
QualType FromPointee2
= FromType2->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
QualType ToPointee2
= ToType2->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
// -- conversion of C* to B* is better than conversion of C* to A*,
if (FromPointee1 == FromPointee2 && ToPointee1 != ToPointee2) {
if (S.IsDerivedFrom(Loc, ToPointee1, ToPointee2))
return ImplicitConversionSequence::Better;
else if (S.IsDerivedFrom(Loc, ToPointee2, ToPointee1))
return ImplicitConversionSequence::Worse;
}
// -- conversion of B* to A* is better than conversion of C* to A*,
if (FromPointee1 != FromPointee2 && ToPointee1 == ToPointee2) {
if (S.IsDerivedFrom(Loc, FromPointee2, FromPointee1))
return ImplicitConversionSequence::Better;
else if (S.IsDerivedFrom(Loc, FromPointee1, FromPointee2))
return ImplicitConversionSequence::Worse;
}
} else if (SCS1.Second == ICK_Pointer_Conversion &&
SCS2.Second == ICK_Pointer_Conversion) {
const ObjCObjectPointerType *FromPtr1
= FromType1->getAs<ObjCObjectPointerType>();
const ObjCObjectPointerType *FromPtr2
= FromType2->getAs<ObjCObjectPointerType>();
const ObjCObjectPointerType *ToPtr1
= ToType1->getAs<ObjCObjectPointerType>();
const ObjCObjectPointerType *ToPtr2
= ToType2->getAs<ObjCObjectPointerType>();
if (FromPtr1 && FromPtr2 && ToPtr1 && ToPtr2) {
// Apply the same conversion ranking rules for Objective-C pointer types
// that we do for C++ pointers to class types. However, we employ the
// Objective-C pseudo-subtyping relationship used for assignment of
// Objective-C pointer types.
bool FromAssignLeft
= S.Context.canAssignObjCInterfaces(FromPtr1, FromPtr2);
bool FromAssignRight
= S.Context.canAssignObjCInterfaces(FromPtr2, FromPtr1);
bool ToAssignLeft
= S.Context.canAssignObjCInterfaces(ToPtr1, ToPtr2);
bool ToAssignRight
= S.Context.canAssignObjCInterfaces(ToPtr2, ToPtr1);
// A conversion to an a non-id object pointer type or qualified 'id'
// type is better than a conversion to 'id'.
if (ToPtr1->isObjCIdType() &&
(ToPtr2->isObjCQualifiedIdType() || ToPtr2->getInterfaceDecl()))
return ImplicitConversionSequence::Worse;
if (ToPtr2->isObjCIdType() &&
(ToPtr1->isObjCQualifiedIdType() || ToPtr1->getInterfaceDecl()))
return ImplicitConversionSequence::Better;
// A conversion to a non-id object pointer type is better than a
// conversion to a qualified 'id' type
if (ToPtr1->isObjCQualifiedIdType() && ToPtr2->getInterfaceDecl())
return ImplicitConversionSequence::Worse;
if (ToPtr2->isObjCQualifiedIdType() && ToPtr1->getInterfaceDecl())
return ImplicitConversionSequence::Better;
// A conversion to an a non-Class object pointer type or qualified 'Class'
// type is better than a conversion to 'Class'.
if (ToPtr1->isObjCClassType() &&
(ToPtr2->isObjCQualifiedClassType() || ToPtr2->getInterfaceDecl()))
return ImplicitConversionSequence::Worse;
if (ToPtr2->isObjCClassType() &&
(ToPtr1->isObjCQualifiedClassType() || ToPtr1->getInterfaceDecl()))
return ImplicitConversionSequence::Better;
// A conversion to a non-Class object pointer type is better than a
// conversion to a qualified 'Class' type.
if (ToPtr1->isObjCQualifiedClassType() && ToPtr2->getInterfaceDecl())
return ImplicitConversionSequence::Worse;
if (ToPtr2->isObjCQualifiedClassType() && ToPtr1->getInterfaceDecl())
return ImplicitConversionSequence::Better;
// -- "conversion of C* to B* is better than conversion of C* to A*,"
if (S.Context.hasSameType(FromType1, FromType2) &&
!FromPtr1->isObjCIdType() && !FromPtr1->isObjCClassType() &&
(ToAssignLeft != ToAssignRight)) {
if (FromPtr1->isSpecialized()) {
// "conversion of B<A> * to B * is better than conversion of B * to
// C *.
bool IsFirstSame =
FromPtr1->getInterfaceDecl() == ToPtr1->getInterfaceDecl();
bool IsSecondSame =
FromPtr1->getInterfaceDecl() == ToPtr2->getInterfaceDecl();
if (IsFirstSame) {
if (!IsSecondSame)
return ImplicitConversionSequence::Better;
} else if (IsSecondSame)
return ImplicitConversionSequence::Worse;
}
return ToAssignLeft? ImplicitConversionSequence::Worse
: ImplicitConversionSequence::Better;
}
// -- "conversion of B* to A* is better than conversion of C* to A*,"
if (S.Context.hasSameUnqualifiedType(ToType1, ToType2) &&
(FromAssignLeft != FromAssignRight))
return FromAssignLeft? ImplicitConversionSequence::Better
: ImplicitConversionSequence::Worse;
}
}
// Ranking of member-pointer types.
if (SCS1.Second == ICK_Pointer_Member && SCS2.Second == ICK_Pointer_Member &&
FromType1->isMemberPointerType() && FromType2->isMemberPointerType() &&
ToType1->isMemberPointerType() && ToType2->isMemberPointerType()) {
const MemberPointerType * FromMemPointer1 =
FromType1->getAs<MemberPointerType>();
const MemberPointerType * ToMemPointer1 =
ToType1->getAs<MemberPointerType>();
const MemberPointerType * FromMemPointer2 =
FromType2->getAs<MemberPointerType>();
const MemberPointerType * ToMemPointer2 =
ToType2->getAs<MemberPointerType>();
const Type *FromPointeeType1 = FromMemPointer1->getClass();
const Type *ToPointeeType1 = ToMemPointer1->getClass();
const Type *FromPointeeType2 = FromMemPointer2->getClass();
const Type *ToPointeeType2 = ToMemPointer2->getClass();
QualType FromPointee1 = QualType(FromPointeeType1, 0).getUnqualifiedType();
QualType ToPointee1 = QualType(ToPointeeType1, 0).getUnqualifiedType();
QualType FromPointee2 = QualType(FromPointeeType2, 0).getUnqualifiedType();
QualType ToPointee2 = QualType(ToPointeeType2, 0).getUnqualifiedType();
// conversion of A::* to B::* is better than conversion of A::* to C::*,
if (FromPointee1 == FromPointee2 && ToPointee1 != ToPointee2) {
if (S.IsDerivedFrom(Loc, ToPointee1, ToPointee2))
return ImplicitConversionSequence::Worse;
else if (S.IsDerivedFrom(Loc, ToPointee2, ToPointee1))
return ImplicitConversionSequence::Better;
}
// conversion of B::* to C::* is better than conversion of A::* to C::*
if (ToPointee1 == ToPointee2 && FromPointee1 != FromPointee2) {
if (S.IsDerivedFrom(Loc, FromPointee1, FromPointee2))
return ImplicitConversionSequence::Better;
else if (S.IsDerivedFrom(Loc, FromPointee2, FromPointee1))
return ImplicitConversionSequence::Worse;
}
}
if (SCS1.Second == ICK_Derived_To_Base) {
// -- conversion of C to B is better than conversion of C to A,
// -- binding of an expression of type C to a reference of type
// B& is better than binding an expression of type C to a
// reference of type A&,
if (S.Context.hasSameUnqualifiedType(FromType1, FromType2) &&
!S.Context.hasSameUnqualifiedType(ToType1, ToType2)) {
if (S.IsDerivedFrom(Loc, ToType1, ToType2))
return ImplicitConversionSequence::Better;
else if (S.IsDerivedFrom(Loc, ToType2, ToType1))
return ImplicitConversionSequence::Worse;
}
// -- conversion of B to A is better than conversion of C to A.
// -- binding of an expression of type B to a reference of type
// A& is better than binding an expression of type C to a
// reference of type A&,
if (!S.Context.hasSameUnqualifiedType(FromType1, FromType2) &&
S.Context.hasSameUnqualifiedType(ToType1, ToType2)) {
if (S.IsDerivedFrom(Loc, FromType2, FromType1))
return ImplicitConversionSequence::Better;
else if (S.IsDerivedFrom(Loc, FromType1, FromType2))
return ImplicitConversionSequence::Worse;
}
}
return ImplicitConversionSequence::Indistinguishable;
}
/// Determine whether the given type is valid, e.g., it is not an invalid
/// C++ class.
static bool isTypeValid(QualType T) {
if (CXXRecordDecl *Record = T->getAsCXXRecordDecl())
return !Record->isInvalidDecl();
return true;
}
/// CompareReferenceRelationship - Compare the two types T1 and T2 to
/// determine whether they are reference-related,
/// reference-compatible, reference-compatible with added
/// qualification, or incompatible, for use in C++ initialization by
/// reference (C++ [dcl.ref.init]p4). Neither type can be a reference
/// type, and the first type (T1) is the pointee type of the reference
/// type being initialized.
Sema::ReferenceCompareResult
Sema::CompareReferenceRelationship(SourceLocation Loc,
QualType OrigT1, QualType OrigT2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion) {
assert(!OrigT1->isReferenceType() &&
"T1 must be the pointee type of the reference type");
assert(!OrigT2->isReferenceType() && "T2 cannot be a reference type");
QualType T1 = Context.getCanonicalType(OrigT1);
QualType T2 = Context.getCanonicalType(OrigT2);
Qualifiers T1Quals, T2Quals;
QualType UnqualT1 = Context.getUnqualifiedArrayType(T1, T1Quals);
QualType UnqualT2 = Context.getUnqualifiedArrayType(T2, T2Quals);
// C++ [dcl.init.ref]p4:
// Given types "cv1 T1" and "cv2 T2," "cv1 T1" is
// reference-related to "cv2 T2" if T1 is the same type as T2, or
// T1 is a base class of T2.
DerivedToBase = false;
ObjCConversion = false;
ObjCLifetimeConversion = false;
QualType ConvertedT2;
if (UnqualT1 == UnqualT2) {
// Nothing to do.
} else if (isCompleteType(Loc, OrigT2) &&
isTypeValid(UnqualT1) && isTypeValid(UnqualT2) &&
IsDerivedFrom(Loc, UnqualT2, UnqualT1))
DerivedToBase = true;
else if (UnqualT1->isObjCObjectOrInterfaceType() &&
UnqualT2->isObjCObjectOrInterfaceType() &&
Context.canBindObjCObjectType(UnqualT1, UnqualT2))
ObjCConversion = true;
else if (UnqualT2->isFunctionType() &&
IsFunctionConversion(UnqualT2, UnqualT1, ConvertedT2))
// C++1z [dcl.init.ref]p4:
// cv1 T1" is reference-compatible with "cv2 T2" if [...] T2 is "noexcept
// function" and T1 is "function"
//
// We extend this to also apply to 'noreturn', so allow any function
// conversion between function types.
return Ref_Compatible;
else
return Ref_Incompatible;
// At this point, we know that T1 and T2 are reference-related (at
// least).
// If the type is an array type, promote the element qualifiers to the type
// for comparison.
if (isa<ArrayType>(T1) && T1Quals)
T1 = Context.getQualifiedType(UnqualT1, T1Quals);
if (isa<ArrayType>(T2) && T2Quals)
T2 = Context.getQualifiedType(UnqualT2, T2Quals);
// C++ [dcl.init.ref]p4:
// "cv1 T1" is reference-compatible with "cv2 T2" if T1 is
// reference-related to T2 and cv1 is the same cv-qualification
// as, or greater cv-qualification than, cv2. For purposes of
// overload resolution, cases for which cv1 is greater
// cv-qualification than cv2 are identified as
// reference-compatible with added qualification (see 13.3.3.2).
//
// Note that we also require equivalence of Objective-C GC and address-space
// qualifiers when performing these computations, so that e.g., an int in
// address space 1 is not reference-compatible with an int in address
// space 2.
if (T1Quals.getObjCLifetime() != T2Quals.getObjCLifetime() &&
T1Quals.compatiblyIncludesObjCLifetime(T2Quals)) {
if (isNonTrivialObjCLifetimeConversion(T2Quals, T1Quals))
ObjCLifetimeConversion = true;
T1Quals.removeObjCLifetime();
T2Quals.removeObjCLifetime();
}
// MS compiler ignores __unaligned qualifier for references; do the same.
T1Quals.removeUnaligned();
T2Quals.removeUnaligned();
if (T1Quals.compatiblyIncludes(T2Quals))
return Ref_Compatible;
else
return Ref_Related;
}
/// Look for a user-defined conversion to a value reference-compatible
/// with DeclType. Return true if something definite is found.
static bool
FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
QualType DeclType, SourceLocation DeclLoc,
Expr *Init, QualType T2, bool AllowRvalues,
bool AllowExplicit) {
assert(T2->isRecordType() && "Can only find conversions of record types.");
CXXRecordDecl *T2RecordDecl
= dyn_cast<CXXRecordDecl>(T2->getAs<RecordType>()->getDecl());
OverloadCandidateSet CandidateSet(
DeclLoc, OverloadCandidateSet::CSK_InitByUserDefinedConversion);
const auto &Conversions = T2RecordDecl->getVisibleConversionFunctions();
for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
NamedDecl *D = *I;
CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext());
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
FunctionTemplateDecl *ConvTemplate
= dyn_cast<FunctionTemplateDecl>(D);
CXXConversionDecl *Conv;
if (ConvTemplate)
Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
else
Conv = cast<CXXConversionDecl>(D);
// If this is an explicit conversion, and we're not allowed to consider
// explicit conversions, skip it.
if (!AllowExplicit && Conv->isExplicit())
continue;
if (AllowRvalues) {
bool DerivedToBase = false;
bool ObjCConversion = false;
bool ObjCLifetimeConversion = false;
// If we are initializing an rvalue reference, don't permit conversion
// functions that return lvalues.
if (!ConvTemplate && DeclType->isRValueReferenceType()) {
const ReferenceType *RefType
= Conv->getConversionType()->getAs<LValueReferenceType>();
if (RefType && !RefType->getPointeeType()->isFunctionType())
continue;
}
if (!ConvTemplate &&
S.CompareReferenceRelationship(
DeclLoc,
Conv->getConversionType().getNonReferenceType()
.getUnqualifiedType(),
DeclType.getNonReferenceType().getUnqualifiedType(),
DerivedToBase, ObjCConversion, ObjCLifetimeConversion) ==
Sema::Ref_Incompatible)
continue;
} else {
// If the conversion function doesn't return a reference type,
// it can't be considered for this conversion. An rvalue reference
// is only acceptable if its referencee is a function type.
const ReferenceType *RefType =
Conv->getConversionType()->getAs<ReferenceType>();
if (!RefType ||
(!RefType->isLValueReferenceType() &&
!RefType->getPointeeType()->isFunctionType()))
continue;
}
if (ConvTemplate)
S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(), ActingDC,
Init, DeclType, CandidateSet,
/*AllowObjCConversionOnExplicit=*/false);
else
S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Init,
DeclType, CandidateSet,
/*AllowObjCConversionOnExplicit=*/false);
}
bool HadMultipleCandidates = (CandidateSet.size() > 1);
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(S, DeclLoc, Best)) {
case OR_Success:
// C++ [over.ics.ref]p1:
//
// [...] If the parameter binds directly to the result of
// applying a conversion function to the argument
// expression, the implicit conversion sequence is a
// user-defined conversion sequence (13.3.3.1.2), with the
// second standard conversion sequence either an identity
// conversion or, if the conversion function returns an
// entity of a type that is a derived class of the parameter
// type, a derived-to-base Conversion.
if (!Best->FinalConversion.DirectBinding)
return false;
ICS.setUserDefined();
ICS.UserDefined.Before = Best->Conversions[0].Standard;
ICS.UserDefined.After = Best->FinalConversion;
ICS.UserDefined.HadMultipleCandidates = HadMultipleCandidates;
ICS.UserDefined.ConversionFunction = Best->Function;
ICS.UserDefined.FoundConversionFunction = Best->FoundDecl;
ICS.UserDefined.EllipsisConversion = false;
assert(ICS.UserDefined.After.ReferenceBinding &&
ICS.UserDefined.After.DirectBinding &&
"Expected a direct reference binding!");
return true;
case OR_Ambiguous:
ICS.setAmbiguous();
for (OverloadCandidateSet::iterator Cand = CandidateSet.begin();
Cand != CandidateSet.end(); ++Cand)
if (Cand->Viable)
ICS.Ambiguous.addConversion(Cand->FoundDecl, Cand->Function);
return true;
case OR_No_Viable_Function:
case OR_Deleted:
// There was no suitable conversion, or we found a deleted
// conversion; continue with other checks.
return false;
}
llvm_unreachable("Invalid OverloadResult!");
}
/// Compute an implicit conversion sequence for reference
/// initialization.
static ImplicitConversionSequence
TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
SourceLocation DeclLoc,
bool SuppressUserConversions,
bool AllowExplicit) {
assert(DeclType->isReferenceType() && "Reference init needs a reference");
// Most paths end in a failed conversion.
ImplicitConversionSequence ICS;
ICS.setBad(BadConversionSequence::no_conversion, Init, DeclType);
QualType T1 = DeclType->getAs<ReferenceType>()->getPointeeType();
QualType T2 = Init->getType();
// If the initializer is the address of an overloaded function, try
// to resolve the overloaded function. If all goes well, T2 is the
// type of the resulting function.
if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) {
DeclAccessPair Found;
if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(Init, DeclType,
false, Found))
T2 = Fn->getType();
}
// Compute some basic properties of the types and the initializer.
bool isRValRef = DeclType->isRValueReferenceType();
bool DerivedToBase = false;
bool ObjCConversion = false;
bool ObjCLifetimeConversion = false;
Expr::Classification InitCategory = Init->Classify(S.Context);
Sema::ReferenceCompareResult RefRelationship
= S.CompareReferenceRelationship(DeclLoc, T1, T2, DerivedToBase,
ObjCConversion, ObjCLifetimeConversion);
// C++0x [dcl.init.ref]p5:
// A reference to type "cv1 T1" is initialized by an expression
// of type "cv2 T2" as follows:
// -- If reference is an lvalue reference and the initializer expression
if (!isRValRef) {
// -- is an lvalue (but is not a bit-field), and "cv1 T1" is
// reference-compatible with "cv2 T2," or
//
// Per C++ [over.ics.ref]p4, we don't check the bit-field property here.
if (InitCategory.isLValue() && RefRelationship == Sema::Ref_Compatible) {
// C++ [over.ics.ref]p1:
// When a parameter of reference type binds directly (8.5.3)
// to an argument expression, the implicit conversion sequence
// is the identity conversion, unless the argument expression
// has a type that is a derived class of the parameter type,
// in which case the implicit conversion sequence is a
// derived-to-base Conversion (13.3.3.1).
ICS.setStandard();
ICS.Standard.First = ICK_Identity;
ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base
: ObjCConversion? ICK_Compatible_Conversion
: ICK_Identity;
ICS.Standard.Third = ICK_Identity;
ICS.Standard.FromTypePtr = T2.getAsOpaquePtr();
ICS.Standard.setToType(0, T2);
ICS.Standard.setToType(1, T1);
ICS.Standard.setToType(2, T1);
ICS.Standard.ReferenceBinding = true;
ICS.Standard.DirectBinding = true;
ICS.Standard.IsLvalueReference = !isRValRef;
ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
ICS.Standard.BindsToRvalue = false;
ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
ICS.Standard.ObjCLifetimeConversionBinding = ObjCLifetimeConversion;
ICS.Standard.CopyConstructor = nullptr;
ICS.Standard.DeprecatedStringLiteralToCharPtr = false;
// Nothing more to do: the inaccessibility/ambiguity check for
// derived-to-base conversions is suppressed when we're
// computing the implicit conversion sequence (C++
// [over.best.ics]p2).
return ICS;
}
// -- has a class type (i.e., T2 is a class type), where T1 is
// not reference-related to T2, and can be implicitly
// converted to an lvalue of type "cv3 T3," where "cv1 T1"
// is reference-compatible with "cv3 T3" 92) (this
// conversion is selected by enumerating the applicable
// conversion functions (13.3.1.6) and choosing the best
// one through overload resolution (13.3)),
if (!SuppressUserConversions && T2->isRecordType() &&
S.isCompleteType(DeclLoc, T2) &&
RefRelationship == Sema::Ref_Incompatible) {
if (FindConversionForRefInit(S, ICS, DeclType, DeclLoc,
Init, T2, /*AllowRvalues=*/false,
AllowExplicit))
return ICS;
}
}
// -- Otherwise, the reference shall be an lvalue reference to a
// non-volatile const type (i.e., cv1 shall be const), or the reference
// shall be an rvalue reference.
if (!isRValRef && (!T1.isConstQualified() || T1.isVolatileQualified()))
return ICS;
// -- If the initializer expression
//
// -- is an xvalue, class prvalue, array prvalue or function
// lvalue and "cv1 T1" is reference-compatible with "cv2 T2", or
if (RefRelationship == Sema::Ref_Compatible &&
(InitCategory.isXValue() ||
(InitCategory.isPRValue() && (T2->isRecordType() || T2->isArrayType())) ||
(InitCategory.isLValue() && T2->isFunctionType()))) {
ICS.setStandard();
ICS.Standard.First = ICK_Identity;
ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base
: ObjCConversion? ICK_Compatible_Conversion
: ICK_Identity;
ICS.Standard.Third = ICK_Identity;
ICS.Standard.FromTypePtr = T2.getAsOpaquePtr();
ICS.Standard.setToType(0, T2);
ICS.Standard.setToType(1, T1);
ICS.Standard.setToType(2, T1);
ICS.Standard.ReferenceBinding = true;
// In C++0x, this is always a direct binding. In C++98/03, it's a direct
// binding unless we're binding to a class prvalue.
// Note: Although xvalues wouldn't normally show up in C++98/03 code, we
// allow the use of rvalue references in C++98/03 for the benefit of
// standard library implementors; therefore, we need the xvalue check here.
ICS.Standard.DirectBinding =
S.getLangOpts().CPlusPlus11 ||
!(InitCategory.isPRValue() || T2->isRecordType());
ICS.Standard.IsLvalueReference = !isRValRef;
ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
ICS.Standard.BindsToRvalue = InitCategory.isRValue();
ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
ICS.Standard.ObjCLifetimeConversionBinding = ObjCLifetimeConversion;
ICS.Standard.CopyConstructor = nullptr;
ICS.Standard.DeprecatedStringLiteralToCharPtr = false;
return ICS;
}
// -- has a class type (i.e., T2 is a class type), where T1 is not
// reference-related to T2, and can be implicitly converted to
// an xvalue, class prvalue, or function lvalue of type
// "cv3 T3", where "cv1 T1" is reference-compatible with
// "cv3 T3",
//
// then the reference is bound to the value of the initializer
// expression in the first case and to the result of the conversion
// in the second case (or, in either case, to an appropriate base
// class subobject).
if (!SuppressUserConversions && RefRelationship == Sema::Ref_Incompatible &&
T2->isRecordType() && S.isCompleteType(DeclLoc, T2) &&
FindConversionForRefInit(S, ICS, DeclType, DeclLoc,
Init, T2, /*AllowRvalues=*/true,
AllowExplicit)) {
// In the second case, if the reference is an rvalue reference
// and the second standard conversion sequence of the
// user-defined conversion sequence includes an lvalue-to-rvalue
// conversion, the program is ill-formed.
if (ICS.isUserDefined() && isRValRef &&
ICS.UserDefined.After.First == ICK_Lvalue_To_Rvalue)
ICS.setBad(BadConversionSequence::no_conversion, Init, DeclType);
return ICS;
}
// A temporary of function type cannot be created; don't even try.
if (T1->isFunctionType())
return ICS;
// -- Otherwise, a temporary of type "cv1 T1" is created and
// initialized from the initializer expression using the
// rules for a non-reference copy initialization (8.5). The
// reference is then bound to the temporary. If T1 is
// reference-related to T2, cv1 must be the same
// cv-qualification as, or greater cv-qualification than,
// cv2; otherwise, the program is ill-formed.
if (RefRelationship == Sema::Ref_Related) {
// If cv1 == cv2 or cv1 is a greater cv-qualified than cv2, then
// we would be reference-compatible or reference-compatible with
// added qualification. But that wasn't the case, so the reference
// initialization fails.
//
// Note that we only want to check address spaces and cvr-qualifiers here.
// ObjC GC, lifetime and unaligned qualifiers aren't important.
Qualifiers T1Quals = T1.getQualifiers();
Qualifiers T2Quals = T2.getQualifiers();
T1Quals.removeObjCGCAttr();
T1Quals.removeObjCLifetime();
T2Quals.removeObjCGCAttr();
T2Quals.removeObjCLifetime();
// MS compiler ignores __unaligned qualifier for references; do the same.
T1Quals.removeUnaligned();
T2Quals.removeUnaligned();
if (!T1Quals.compatiblyIncludes(T2Quals))
return ICS;
}
// If at least one of the types is a class type, the types are not
// related, and we aren't allowed any user conversions, the
// reference binding fails. This case is important for breaking
// recursion, since TryImplicitConversion below will attempt to
// create a temporary through the use of a copy constructor.
if (SuppressUserConversions && RefRelationship == Sema::Ref_Incompatible &&
(T1->isRecordType() || T2->isRecordType()))
return ICS;
// If T1 is reference-related to T2 and the reference is an rvalue
// reference, the initializer expression shall not be an lvalue.
if (RefRelationship >= Sema::Ref_Related &&
isRValRef && Init->Classify(S.Context).isLValue())
return ICS;
// C++ [over.ics.ref]p2:
// When a parameter of reference type is not bound directly to
// an argument expression, the conversion sequence is the one
// required to convert the argument expression to the
// underlying type of the reference according to
// 13.3.3.1. Conceptually, this conversion sequence corresponds
// to copy-initializing a temporary of the underlying type with
// the argument expression. Any difference in top-level
// cv-qualification is subsumed by the initialization itself
// and does not constitute a conversion.
ICS = TryImplicitConversion(S, Init, T1, SuppressUserConversions,
/*AllowExplicit=*/false,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false,
/*AllowObjCConversionOnExplicit=*/false);
// Of course, that's still a reference binding.
if (ICS.isStandard()) {
ICS.Standard.ReferenceBinding = true;
ICS.Standard.IsLvalueReference = !isRValRef;
ICS.Standard.BindsToFunctionLvalue = false;
ICS.Standard.BindsToRvalue = true;
ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
ICS.Standard.ObjCLifetimeConversionBinding = false;
} else if (ICS.isUserDefined()) {
const ReferenceType *LValRefType =
ICS.UserDefined.ConversionFunction->getReturnType()
->getAs<LValueReferenceType>();
// C++ [over.ics.ref]p3:
// Except for an implicit object parameter, for which see 13.3.1, a
// standard conversion sequence cannot be formed if it requires [...]
// binding an rvalue reference to an lvalue other than a function
// lvalue.
// Note that the function case is not possible here.
if (DeclType->isRValueReferenceType() && LValRefType) {
// FIXME: This is the wrong BadConversionSequence. The problem is binding
// an rvalue reference to a (non-function) lvalue, not binding an lvalue
// reference to an rvalue!
ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, Init, DeclType);
return ICS;
}
ICS.UserDefined.After.ReferenceBinding = true;
ICS.UserDefined.After.IsLvalueReference = !isRValRef;
ICS.UserDefined.After.BindsToFunctionLvalue = false;
ICS.UserDefined.After.BindsToRvalue = !LValRefType;
ICS.UserDefined.After.BindsImplicitObjectArgumentWithoutRefQualifier = false;
ICS.UserDefined.After.ObjCLifetimeConversionBinding = false;
}
return ICS;
}
static ImplicitConversionSequence
TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
bool SuppressUserConversions,
bool InOverloadResolution,
bool AllowObjCWritebackConversion,
bool AllowExplicit = false);
/// TryListConversion - Try to copy-initialize a value of type ToType from the
/// initializer list From.
static ImplicitConversionSequence
TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
bool SuppressUserConversions,
bool InOverloadResolution,
bool AllowObjCWritebackConversion) {
// C++11 [over.ics.list]p1:
// When an argument is an initializer list, it is not an expression and
// special rules apply for converting it to a parameter type.
ImplicitConversionSequence Result;
Result.setBad(BadConversionSequence::no_conversion, From, ToType);
// We need a complete type for what follows. Incomplete types can never be
// initialized from init lists.
if (!S.isCompleteType(From->getLocStart(), ToType))
return Result;
// Per DR1467:
// If the parameter type is a class X and the initializer list has a single
// element of type cv U, where U is X or a class derived from X, the
// implicit conversion sequence is the one required to convert the element
// to the parameter type.
//
// Otherwise, if the parameter type is a character array [... ]
// and the initializer list has a single element that is an
// appropriately-typed string literal (8.5.2 [dcl.init.string]), the
// implicit conversion sequence is the identity conversion.
if (From->getNumInits() == 1) {
if (ToType->isRecordType()) {
QualType InitType = From->getInit(0)->getType();
if (S.Context.hasSameUnqualifiedType(InitType, ToType) ||
S.IsDerivedFrom(From->getLocStart(), InitType, ToType))
return TryCopyInitialization(S, From->getInit(0), ToType,
SuppressUserConversions,
InOverloadResolution,
AllowObjCWritebackConversion);
}
// FIXME: Check the other conditions here: array of character type,
// initializer is a string literal.
if (ToType->isArrayType()) {
InitializedEntity Entity =
InitializedEntity::InitializeParameter(S.Context, ToType,
/*Consumed=*/false);
if (S.CanPerformCopyInitialization(Entity, From)) {
Result.setStandard();
Result.Standard.setAsIdentityConversion();
Result.Standard.setFromType(ToType);
Result.Standard.setAllToTypes(ToType);
return Result;
}
}
}
// C++14 [over.ics.list]p2: Otherwise, if the parameter type [...] (below).
// C++11 [over.ics.list]p2:
// If the parameter type is std::initializer_list<X> or "array of X" and
// all the elements can be implicitly converted to X, the implicit
// conversion sequence is the worst conversion necessary to convert an
// element of the list to X.
//
// C++14 [over.ics.list]p3:
// Otherwise, if the parameter type is "array of N X", if the initializer
// list has exactly N elements or if it has fewer than N elements and X is
// default-constructible, and if all the elements of the initializer list
// can be implicitly converted to X, the implicit conversion sequence is
// the worst conversion necessary to convert an element of the list to X.
//
// FIXME: We're missing a lot of these checks.
bool toStdInitializerList = false;
QualType X;
if (ToType->isArrayType())
X = S.Context.getAsArrayType(ToType)->getElementType();
else
toStdInitializerList = S.isStdInitializerList(ToType, &X);
if (!X.isNull()) {
for (unsigned i = 0, e = From->getNumInits(); i < e; ++i) {
Expr *Init = From->getInit(i);
ImplicitConversionSequence ICS =
TryCopyInitialization(S, Init, X, SuppressUserConversions,
InOverloadResolution,
AllowObjCWritebackConversion);
// If a single element isn't convertible, fail.
if (ICS.isBad()) {
Result = ICS;
break;
}
// Otherwise, look for the worst conversion.
if (Result.isBad() ||
CompareImplicitConversionSequences(S, From->getLocStart(), ICS,
Result) ==
ImplicitConversionSequence::Worse)
Result = ICS;
}
// For an empty list, we won't have computed any conversion sequence.
// Introduce the identity conversion sequence.
if (From->getNumInits() == 0) {
Result.setStandard();
Result.Standard.setAsIdentityConversion();
Result.Standard.setFromType(ToType);
Result.Standard.setAllToTypes(ToType);
}
Result.setStdInitializerListElement(toStdInitializerList);
return Result;
}
// C++14 [over.ics.list]p4:
// C++11 [over.ics.list]p3:
// Otherwise, if the parameter is a non-aggregate class X and overload
// resolution chooses a single best constructor [...] the implicit
// conversion sequence is a user-defined conversion sequence. If multiple
// constructors are viable but none is better than the others, the
// implicit conversion sequence is a user-defined conversion sequence.
if (ToType->isRecordType() && !ToType->isAggregateType()) {
// This function can deal with initializer lists.
return TryUserDefinedConversion(S, From, ToType, SuppressUserConversions,
/*AllowExplicit=*/false,
InOverloadResolution, /*CStyle=*/false,
AllowObjCWritebackConversion,
/*AllowObjCConversionOnExplicit=*/false);
}
// C++14 [over.ics.list]p5:
// C++11 [over.ics.list]p4:
// Otherwise, if the parameter has an aggregate type which can be
// initialized from the initializer list [...] the implicit conversion
// sequence is a user-defined conversion sequence.
if (ToType->isAggregateType()) {
// Type is an aggregate, argument is an init list. At this point it comes
// down to checking whether the initialization works.
// FIXME: Find out whether this parameter is consumed or not.
// FIXME: Expose SemaInit's aggregate initialization code so that we don't
// need to call into the initialization code here; overload resolution
// should not be doing that.
InitializedEntity Entity =
InitializedEntity::InitializeParameter(S.Context, ToType,
/*Consumed=*/false);
if (S.CanPerformCopyInitialization(Entity, From)) {
Result.setUserDefined();
Result.UserDefined.Before.setAsIdentityConversion();
// Initializer lists don't have a type.
Result.UserDefined.Before.setFromType(QualType());
Result.UserDefined.Before.setAllToTypes(QualType());
Result.UserDefined.After.setAsIdentityConversion();
Result.UserDefined.After.setFromType(ToType);
Result.UserDefined.After.setAllToTypes(ToType);
Result.UserDefined.ConversionFunction = nullptr;
}
return Result;
}
// C++14 [over.ics.list]p6:
// C++11 [over.ics.list]p5:
// Otherwise, if the parameter is a reference, see 13.3.3.1.4.
if (ToType->isReferenceType()) {
// The standard is notoriously unclear here, since 13.3.3.1.4 doesn't
// mention initializer lists in any way. So we go by what list-
// initialization would do and try to extrapolate from that.
QualType T1 = ToType->getAs<ReferenceType>()->getPointeeType();
// If the initializer list has a single element that is reference-related
// to the parameter type, we initialize the reference from that.
if (From->getNumInits() == 1) {
Expr *Init = From->getInit(0);
QualType T2 = Init->getType();
// If the initializer is the address of an overloaded function, try
// to resolve the overloaded function. If all goes well, T2 is the
// type of the resulting function.
if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) {
DeclAccessPair Found;
if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(
Init, ToType, false, Found))
T2 = Fn->getType();
}
// Compute some basic properties of the types and the initializer.
bool dummy1 = false;
bool dummy2 = false;
bool dummy3 = false;
Sema::ReferenceCompareResult RefRelationship
= S.CompareReferenceRelationship(From->getLocStart(), T1, T2, dummy1,
dummy2, dummy3);
if (RefRelationship >= Sema::Ref_Related) {
return TryReferenceInit(S, Init, ToType, /*FIXME*/From->getLocStart(),
SuppressUserConversions,
/*AllowExplicit=*/false);
}
}
// Otherwise, we bind the reference to a temporary created from the
// initializer list.
Result = TryListConversion(S, From, T1, SuppressUserConversions,
InOverloadResolution,
AllowObjCWritebackConversion);
if (Result.isFailure())
return Result;
assert(!Result.isEllipsis() &&
"Sub-initialization cannot result in ellipsis conversion.");
// Can we even bind to a temporary?
if (ToType->isRValueReferenceType() ||
(T1.isConstQualified() && !T1.isVolatileQualified())) {
StandardConversionSequence &SCS = Result.isStandard() ? Result.Standard :
Result.UserDefined.After;
SCS.ReferenceBinding = true;
SCS.IsLvalueReference = ToType->isLValueReferenceType();
SCS.BindsToRvalue = true;
SCS.BindsToFunctionLvalue = false;
SCS.BindsImplicitObjectArgumentWithoutRefQualifier = false;
SCS.ObjCLifetimeConversionBinding = false;
} else
Result.setBad(BadConversionSequence::lvalue_ref_to_rvalue,
From, ToType);
return Result;
}
// C++14 [over.ics.list]p7:
// C++11 [over.ics.list]p6:
// Otherwise, if the parameter type is not a class:
if (!ToType->isRecordType()) {
// - if the initializer list has one element that is not itself an
// initializer list, the implicit conversion sequence is the one
// required to convert the element to the parameter type.
unsigned NumInits = From->getNumInits();
if (NumInits == 1 && !isa<InitListExpr>(From->getInit(0)))
Result = TryCopyInitialization(S, From->getInit(0), ToType,
SuppressUserConversions,
InOverloadResolution,
AllowObjCWritebackConversion);
// - if the initializer list has no elements, the implicit conversion
// sequence is the identity conversion.
else if (NumInits == 0) {
Result.setStandard();
Result.Standard.setAsIdentityConversion();
Result.Standard.setFromType(ToType);
Result.Standard.setAllToTypes(ToType);
}
return Result;
}
// C++14 [over.ics.list]p8:
// C++11 [over.ics.list]p7:
// In all cases other than those enumerated above, no conversion is possible
return Result;
}
/// TryCopyInitialization - Try to copy-initialize a value of type
/// ToType from the expression From. Return the implicit conversion
/// sequence required to pass this argument, which may be a bad
/// conversion sequence (meaning that the argument cannot be passed to
/// a parameter of this type). If @p SuppressUserConversions, then we
/// do not permit any user-defined conversion sequences.
static ImplicitConversionSequence
TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
bool SuppressUserConversions,
bool InOverloadResolution,
bool AllowObjCWritebackConversion,
bool AllowExplicit) {
if (InitListExpr *FromInitList = dyn_cast<InitListExpr>(From))
return TryListConversion(S, FromInitList, ToType, SuppressUserConversions,
InOverloadResolution,AllowObjCWritebackConversion);
if (ToType->isReferenceType())
return TryReferenceInit(S, From, ToType,
/*FIXME:*/From->getLocStart(),
SuppressUserConversions,
AllowExplicit);
return TryImplicitConversion(S, From, ToType,
SuppressUserConversions,
/*AllowExplicit=*/false,
InOverloadResolution,
/*CStyle=*/false,
AllowObjCWritebackConversion,
/*AllowObjCConversionOnExplicit=*/false);
}
static bool TryCopyInitialization(const CanQualType FromQTy,
const CanQualType ToQTy,
Sema &S,
SourceLocation Loc,
ExprValueKind FromVK) {
OpaqueValueExpr TmpExpr(Loc, FromQTy, FromVK);
ImplicitConversionSequence ICS =
TryCopyInitialization(S, &TmpExpr, ToQTy, true, true, false);
return !ICS.isBad();
}
/// TryObjectArgumentInitialization - Try to initialize the object
/// parameter of the given member function (@c Method) from the
/// expression @p From.
static ImplicitConversionSequence
TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
Expr::Classification FromClassification,
CXXMethodDecl *Method,
CXXRecordDecl *ActingContext) {
QualType ClassType = S.Context.getTypeDeclType(ActingContext);
// [class.dtor]p2: A destructor can be invoked for a const, volatile or
// const volatile object.
unsigned Quals = isa<CXXDestructorDecl>(Method) ?
Qualifiers::Const | Qualifiers::Volatile : Method->getTypeQualifiers();
QualType ImplicitParamType = S.Context.getCVRQualifiedType(ClassType, Quals);
// Set up the conversion sequence as a "bad" conversion, to allow us
// to exit early.
ImplicitConversionSequence ICS;
// We need to have an object of class type.
if (const PointerType *PT = FromType->getAs<PointerType>()) {
FromType = PT->getPointeeType();
// When we had a pointer, it's implicitly dereferenced, so we
// better have an lvalue.
assert(FromClassification.isLValue());
}
assert(FromType->isRecordType());
// C++0x [over.match.funcs]p4:
// For non-static member functions, the type of the implicit object
// parameter is
//
// - "lvalue reference to cv X" for functions declared without a
// ref-qualifier or with the & ref-qualifier
// - "rvalue reference to cv X" for functions declared with the &&
// ref-qualifier
//
// where X is the class of which the function is a member and cv is the
// cv-qualification on the member function declaration.
//
// However, when finding an implicit conversion sequence for the argument, we
// are not allowed to perform user-defined conversions
// (C++ [over.match.funcs]p5). We perform a simplified version of
// reference binding here, that allows class rvalues to bind to
// non-constant references.
// First check the qualifiers.
QualType FromTypeCanon = S.Context.getCanonicalType(FromType);
if (ImplicitParamType.getCVRQualifiers()
!= FromTypeCanon.getLocalCVRQualifiers() &&
!ImplicitParamType.isAtLeastAsQualifiedAs(FromTypeCanon)) {
ICS.setBad(BadConversionSequence::bad_qualifiers,
FromType, ImplicitParamType);
return ICS;
}
// Check that we have either the same type or a derived type. It
// affects the conversion rank.
QualType ClassTypeCanon = S.Context.getCanonicalType(ClassType);
ImplicitConversionKind SecondKind;
if (ClassTypeCanon == FromTypeCanon.getLocalUnqualifiedType()) {
SecondKind = ICK_Identity;
} else if (S.IsDerivedFrom(Loc, FromType, ClassType))
SecondKind = ICK_Derived_To_Base;
else {
ICS.setBad(BadConversionSequence::unrelated_class,
FromType, ImplicitParamType);
return ICS;
}
// Check the ref-qualifier.
switch (Method->getRefQualifier()) {
case RQ_None:
// Do nothing; we don't care about lvalueness or rvalueness.
break;
case RQ_LValue:
if (!FromClassification.isLValue() && Quals != Qualifiers::Const) {
// non-const lvalue reference cannot bind to an rvalue
ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, FromType,
ImplicitParamType);
return ICS;
}
break;
case RQ_RValue:
if (!FromClassification.isRValue()) {
// rvalue reference cannot bind to an lvalue
ICS.setBad(BadConversionSequence::rvalue_ref_to_lvalue, FromType,
ImplicitParamType);
return ICS;
}
break;
}
// Success. Mark this as a reference binding.
ICS.setStandard();
ICS.Standard.setAsIdentityConversion();
ICS.Standard.Second = SecondKind;
ICS.Standard.setFromType(FromType);
ICS.Standard.setAllToTypes(ImplicitParamType);
ICS.Standard.ReferenceBinding = true;
ICS.Standard.DirectBinding = true;
ICS.Standard.IsLvalueReference = Method->getRefQualifier() != RQ_RValue;
ICS.Standard.BindsToFunctionLvalue = false;
ICS.Standard.BindsToRvalue = FromClassification.isRValue();
ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier
= (Method->getRefQualifier() == RQ_None);
return ICS;
}
/// PerformObjectArgumentInitialization - Perform initialization of
/// the implicit object parameter for the given Method with the given
/// expression.
ExprResult
Sema::PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method) {
QualType FromRecordType, DestType;
QualType ImplicitParamRecordType =
Method->getThisType(Context)->getAs<PointerType>()->getPointeeType();
Expr::Classification FromClassification;
if (const PointerType *PT = From->getType()->getAs<PointerType>()) {
FromRecordType = PT->getPointeeType();
DestType = Method->getThisType(Context);
FromClassification = Expr::Classification::makeSimpleLValue();
} else {
FromRecordType = From->getType();
DestType = ImplicitParamRecordType;
FromClassification = From->Classify(Context);
// When performing member access on an rvalue, materialize a temporary.
if (From->isRValue()) {
From = CreateMaterializeTemporaryExpr(FromRecordType, From,
Method->getRefQualifier() !=
RefQualifierKind::RQ_RValue);
}
}
// Note that we always use the true parent context when performing
// the actual argument initialization.
ImplicitConversionSequence ICS = TryObjectArgumentInitialization(
*this, From->getLocStart(), From->getType(), FromClassification, Method,
Method->getParent());
if (ICS.isBad()) {
switch (ICS.Bad.Kind) {
case BadConversionSequence::bad_qualifiers: {
Qualifiers FromQs = FromRecordType.getQualifiers();
Qualifiers ToQs = DestType.getQualifiers();
unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers();
if (CVR) {
Diag(From->getLocStart(),
diag::err_member_function_call_bad_cvr)
<< Method->getDeclName() << FromRecordType << (CVR - 1)
<< From->getSourceRange();
Diag(Method->getLocation(), diag::note_previous_decl)
<< Method->getDeclName();
return ExprError();
}
break;
}
case BadConversionSequence::lvalue_ref_to_rvalue:
case BadConversionSequence::rvalue_ref_to_lvalue: {
bool IsRValueQualified =
Method->getRefQualifier() == RefQualifierKind::RQ_RValue;
Diag(From->getLocStart(), diag::err_member_function_call_bad_ref)
<< Method->getDeclName() << FromClassification.isRValue()
<< IsRValueQualified;
Diag(Method->getLocation(), diag::note_previous_decl)
<< Method->getDeclName();
return ExprError();
}
case BadConversionSequence::no_conversion:
case BadConversionSequence::unrelated_class:
break;
}
return Diag(From->getLocStart(),
diag::err_member_function_call_bad_type)
<< ImplicitParamRecordType << FromRecordType << From->getSourceRange();
}
if (ICS.Standard.Second == ICK_Derived_To_Base) {
ExprResult FromRes =
PerformObjectMemberConversion(From, Qualifier, FoundDecl, Method);
if (FromRes.isInvalid())
return ExprError();
From = FromRes.get();
}
if (!Context.hasSameType(From->getType(), DestType))
From = ImpCastExprToType(From, DestType, CK_NoOp,
From->getValueKind()).get();
return From;
}
/// TryContextuallyConvertToBool - Attempt to contextually convert the
/// expression From to bool (C++0x [conv]p3).
static ImplicitConversionSequence
TryContextuallyConvertToBool(Sema &S, Expr *From) {
return TryImplicitConversion(S, From, S.Context.BoolTy,
/*SuppressUserConversions=*/false,
/*AllowExplicit=*/true,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false,
/*AllowObjCConversionOnExplicit=*/false);
}
/// PerformContextuallyConvertToBool - Perform a contextual conversion
/// of the expression From to bool (C++0x [conv]p3).
ExprResult Sema::PerformContextuallyConvertToBool(Expr *From) {
if (checkPlaceholderForOverload(*this, From))
return ExprError();
ImplicitConversionSequence ICS = TryContextuallyConvertToBool(*this, From);
if (!ICS.isBad())
return PerformImplicitConversion(From, Context.BoolTy, ICS, AA_Converting);
if (!DiagnoseMultipleUserDefinedConversion(From, Context.BoolTy))
return Diag(From->getLocStart(),
diag::err_typecheck_bool_condition)
<< From->getType() << From->getSourceRange();
return ExprError();
}
/// Check that the specified conversion is permitted in a converted constant
/// expression, according to C++11 [expr.const]p3. Return true if the conversion
/// is acceptable.
static bool CheckConvertedConstantConversions(Sema &S,
StandardConversionSequence &SCS) {
// Since we know that the target type is an integral or unscoped enumeration
// type, most conversion kinds are impossible. All possible First and Third
// conversions are fine.
switch (SCS.Second) {
case ICK_Identity:
case ICK_Function_Conversion:
case ICK_Integral_Promotion:
case ICK_Integral_Conversion: // Narrowing conversions are checked elsewhere.
case ICK_Zero_Queue_Conversion:
return true;
case ICK_Boolean_Conversion:
// Conversion from an integral or unscoped enumeration type to bool is
// classified as ICK_Boolean_Conversion, but it's also arguably an integral
// conversion, so we allow it in a converted constant expression.
//
// FIXME: Per core issue 1407, we should not allow this, but that breaks
// a lot of popular code. We should at least add a warning for this
// (non-conforming) extension.
return SCS.getFromType()->isIntegralOrUnscopedEnumerationType() &&
SCS.getToType(2)->isBooleanType();
case ICK_Pointer_Conversion:
case ICK_Pointer_Member:
// C++1z: null pointer conversions and null member pointer conversions are
// only permitted if the source type is std::nullptr_t.
return SCS.getFromType()->isNullPtrType();
case ICK_Floating_Promotion:
case ICK_Complex_Promotion:
case ICK_Floating_Conversion:
case ICK_Complex_Conversion:
case ICK_Floating_Integral:
case ICK_Compatible_Conversion:
case ICK_Derived_To_Base:
case ICK_Vector_Conversion:
case ICK_Vector_Splat:
case ICK_Complex_Real:
case ICK_Block_Pointer_Conversion:
case ICK_TransparentUnionConversion:
case ICK_Writeback_Conversion:
case ICK_Zero_Event_Conversion:
case ICK_C_Only_Conversion:
case ICK_Incompatible_Pointer_Conversion:
return false;
case ICK_Lvalue_To_Rvalue:
case ICK_Array_To_Pointer:
case ICK_Function_To_Pointer:
llvm_unreachable("found a first conversion kind in Second");
case ICK_Qualification:
llvm_unreachable("found a third conversion kind in Second");
case ICK_Num_Conversion_Kinds:
break;
}
llvm_unreachable("unknown conversion kind");
}
/// CheckConvertedConstantExpression - Check that the expression From is a
/// converted constant expression of type T, perform the conversion and produce
/// the converted expression, per C++11 [expr.const]p3.
static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
QualType T, APValue &Value,
Sema::CCEKind CCE,
bool RequireInt) {
assert(S.getLangOpts().CPlusPlus11 &&
"converted constant expression outside C++11");
if (checkPlaceholderForOverload(S, From))
return ExprError();
// C++1z [expr.const]p3:
// A converted constant expression of type T is an expression,
// implicitly converted to type T, where the converted
// expression is a constant expression and the implicit conversion
// sequence contains only [... list of conversions ...].
// C++1z [stmt.if]p2:
// If the if statement is of the form if constexpr, the value of the
// condition shall be a contextually converted constant expression of type
// bool.
ImplicitConversionSequence ICS =
CCE == Sema::CCEK_ConstexprIf
? TryContextuallyConvertToBool(S, From)
: TryCopyInitialization(S, From, T,
/*SuppressUserConversions=*/false,
/*InOverloadResolution=*/false,
/*AllowObjcWritebackConversion=*/false,
/*AllowExplicit=*/false);
StandardConversionSequence *SCS = nullptr;
switch (ICS.getKind()) {
case ImplicitConversionSequence::StandardConversion:
SCS = &ICS.Standard;
break;
case ImplicitConversionSequence::UserDefinedConversion:
// We are converting to a non-class type, so the Before sequence
// must be trivial.
SCS = &ICS.UserDefined.After;
break;
case ImplicitConversionSequence::AmbiguousConversion:
case ImplicitConversionSequence::BadConversion:
if (!S.DiagnoseMultipleUserDefinedConversion(From, T))
return S.Diag(From->getLocStart(),
diag::err_typecheck_converted_constant_expression)
<< From->getType() << From->getSourceRange() << T;
return ExprError();
case ImplicitConversionSequence::EllipsisConversion:
llvm_unreachable("ellipsis conversion in converted constant expression");
}
// Check that we would only use permitted conversions.
if (!CheckConvertedConstantConversions(S, *SCS)) {
return S.Diag(From->getLocStart(),
diag::err_typecheck_converted_constant_expression_disallowed)
<< From->getType() << From->getSourceRange() << T;
}
// [...] and where the reference binding (if any) binds directly.
if (SCS->ReferenceBinding && !SCS->DirectBinding) {
return S.Diag(From->getLocStart(),
diag::err_typecheck_converted_constant_expression_indirect)
<< From->getType() << From->getSourceRange() << T;
}
ExprResult Result =
S.PerformImplicitConversion(From, T, ICS, Sema::AA_Converting);
if (Result.isInvalid())
return Result;
// Check for a narrowing implicit conversion.
APValue PreNarrowingValue;
QualType PreNarrowingType;
switch (SCS->getNarrowingKind(S.Context, Result.get(), PreNarrowingValue,
PreNarrowingType)) {
case NK_Dependent_Narrowing:
// Implicit conversion to a narrower type, but the expression is
// value-dependent so we can't tell whether it's actually narrowing.
case NK_Variable_Narrowing:
// Implicit conversion to a narrower type, and the value is not a constant
// expression. We'll diagnose this in a moment.
case NK_Not_Narrowing:
break;
case NK_Constant_Narrowing:
S.Diag(From->getLocStart(), diag::ext_cce_narrowing)
<< CCE << /*Constant*/1
<< PreNarrowingValue.getAsString(S.Context, PreNarrowingType) << T;
break;
case NK_Type_Narrowing:
S.Diag(From->getLocStart(), diag::ext_cce_narrowing)
<< CCE << /*Constant*/0 << From->getType() << T;
break;
}
if (Result.get()->isValueDependent()) {
Value = APValue();
return Result;
}
// Check the expression is a constant expression.
SmallVector<PartialDiagnosticAt, 8> Notes;
Expr::EvalResult Eval;
Eval.Diag = &Notes;
Expr::ConstExprUsage Usage = CCE == Sema::CCEK_TemplateArg
? Expr::EvaluateForMangling
: Expr::EvaluateForCodeGen;
if (!Result.get()->EvaluateAsConstantExpr(Eval, Usage, S.Context) ||
(RequireInt && !Eval.Val.isInt())) {
// The expression can't be folded, so we can't keep it at this position in
// the AST.
Result = ExprError();
} else {
Value = Eval.Val;
if (Notes.empty()) {
// It's a constant expression.
return Result;
}
}
// It's not a constant expression. Produce an appropriate diagnostic.
if (Notes.size() == 1 &&
Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr)
S.Diag(Notes[0].first, diag::err_expr_not_cce) << CCE;
else {
S.Diag(From->getLocStart(), diag::err_expr_not_cce)
<< CCE << From->getSourceRange();
for (unsigned I = 0; I < Notes.size(); ++I)
S.Diag(Notes[I].first, Notes[I].second);
}
return ExprError();
}
ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE) {
return ::CheckConvertedConstantExpression(*this, From, T, Value, CCE, false);
}
ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value,
CCEKind CCE) {
assert(T->isIntegralOrEnumerationType() && "unexpected converted const type");
APValue V;
auto R = ::CheckConvertedConstantExpression(*this, From, T, V, CCE, true);
if (!R.isInvalid() && !R.get()->isValueDependent())
Value = V.getInt();
return R;
}
/// dropPointerConversions - If the given standard conversion sequence
/// involves any pointer conversions, remove them. This may change
/// the result type of the conversion sequence.
static void dropPointerConversion(StandardConversionSequence &SCS) {
if (SCS.Second == ICK_Pointer_Conversion) {
SCS.Second = ICK_Identity;
SCS.Third = ICK_Identity;
SCS.ToTypePtrs[2] = SCS.ToTypePtrs[1] = SCS.ToTypePtrs[0];
}
}
/// TryContextuallyConvertToObjCPointer - Attempt to contextually
/// convert the expression From to an Objective-C pointer type.
static ImplicitConversionSequence
TryContextuallyConvertToObjCPointer(Sema &S, Expr *From) {
// Do an implicit conversion to 'id'.
QualType Ty = S.Context.getObjCIdType();
ImplicitConversionSequence ICS
= TryImplicitConversion(S, From, Ty,
// FIXME: Are these flags correct?
/*SuppressUserConversions=*/false,
/*AllowExplicit=*/true,
/*InOverloadResolution=*/false,
/*CStyle=*/false,
/*AllowObjCWritebackConversion=*/false,
/*AllowObjCConversionOnExplicit=*/true);
// Strip off any final conversions to 'id'.
switch (ICS.getKind()) {
case ImplicitConversionSequence::BadConversion:
case ImplicitConversionSequence::AmbiguousConversion:
case ImplicitConversionSequence::EllipsisConversion:
break;
case ImplicitConversionSequence::UserDefinedConversion:
dropPointerConversion(ICS.UserDefined.After);
break;
case ImplicitConversionSequence::StandardConversion:
dropPointerConversion(ICS.Standard);
break;
}
return ICS;
}
/// PerformContextuallyConvertToObjCPointer - Perform a contextual
/// conversion of the expression From to an Objective-C pointer type.
/// Returns a valid but null ExprResult if no conversion sequence exists.
ExprResult Sema::PerformContextuallyConvertToObjCPointer(Expr *From) {
if (checkPlaceholderForOverload(*this, From))
return ExprError();
QualType Ty = Context.getObjCIdType();
ImplicitConversionSequence ICS =
TryContextuallyConvertToObjCPointer(*this, From);
if (!ICS.isBad())
return PerformImplicitConversion(From, Ty, ICS, AA_Converting);
return ExprResult();
}
/// Determine whether the provided type is an integral type, or an enumeration
/// type of a permitted flavor.
bool Sema::ICEConvertDiagnoser::match(QualType T) {
return AllowScopedEnumerations ? T->isIntegralOrEnumerationType()
: T->isIntegralOrUnscopedEnumerationType();
}
static ExprResult
diagnoseAmbiguousConversion(Sema &SemaRef, SourceLocation Loc, Expr *From,
Sema::ContextualImplicitConverter &Converter,
QualType T, UnresolvedSetImpl &ViableConversions) {
if (Converter.Suppress)
return ExprError();
Converter.diagnoseAmbiguous(SemaRef, Loc, T) << From->getSourceRange();
for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) {
CXXConversionDecl *Conv =
cast<CXXConversionDecl>(ViableConversions[I]->getUnderlyingDecl());
QualType ConvTy = Conv->getConversionType().getNonReferenceType();
Converter.noteAmbiguous(SemaRef, Conv, ConvTy);
}
return From;
}
static bool
diagnoseNoViableConversion(Sema &SemaRef, SourceLocation Loc, Expr *&From,
Sema::ContextualImplicitConverter &Converter,
QualType T, bool HadMultipleCandidates,
UnresolvedSetImpl &ExplicitConversions) {
if (ExplicitConversions.size() == 1 && !Converter.Suppress) {
DeclAccessPair Found = ExplicitConversions[0];
CXXConversionDecl *Conversion =
cast<CXXConversionDecl>(Found->getUnderlyingDecl());
// The user probably meant to invoke the given explicit
// conversion; use it.
QualType ConvTy = Conversion->getConversionType().getNonReferenceType();
std::string TypeStr;
ConvTy.getAsStringInternal(TypeStr, SemaRef.getPrintingPolicy());
Converter.diagnoseExplicitConv(SemaRef, Loc, T, ConvTy)
<< FixItHint::CreateInsertion(From->getLocStart(),
"static_cast<" + TypeStr + ">(")
<< FixItHint::CreateInsertion(
SemaRef.getLocForEndOfToken(From->getLocEnd()), ")");
Converter.noteExplicitConv(SemaRef, Conversion, ConvTy);
// If we aren't in a SFINAE context, build a call to the
// explicit conversion function.
if (SemaRef.isSFINAEContext())
return true;
SemaRef.CheckMemberOperatorAccess(From->getExprLoc(), From, nullptr, Found);
ExprResult Result = SemaRef.BuildCXXMemberCallExpr(From, Found, Conversion,
HadMultipleCandidates);
if (Result.isInvalid())
return true;
// Record usage of conversion in an implicit cast.
From = ImplicitCastExpr::Create(SemaRef.Context, Result.get()->getType(),
CK_UserDefinedConversion, Result.get(),
nullptr, Result.get()->getValueKind());
}
return false;
}
static bool recordConversion(Sema &SemaRef, SourceLocation Loc, Expr *&From,
Sema::ContextualImplicitConverter &Converter,
QualType T, bool HadMultipleCandidates,
DeclAccessPair &Found) {
CXXConversionDecl *Conversion =
cast<CXXConversionDecl>(Found->getUnderlyingDecl());
SemaRef.CheckMemberOperatorAccess(From->getExprLoc(), From, nullptr, Found);
QualType ToType = Conversion->getConversionType().getNonReferenceType();
if (!Converter.SuppressConversion) {
if (SemaRef.isSFINAEContext())
return true;
Converter.diagnoseConversion(SemaRef, Loc, T, ToType)
<< From->getSourceRange();
}
ExprResult Result = SemaRef.BuildCXXMemberCallExpr(From, Found, Conversion,
HadMultipleCandidates);
if (Result.isInvalid())
return true;
// Record usage of conversion in an implicit cast.
From = ImplicitCastExpr::Create(SemaRef.Context, Result.get()->getType(),
CK_UserDefinedConversion, Result.get(),
nullptr, Result.get()->getValueKind());
return false;
}
static ExprResult finishContextualImplicitConversion(
Sema &SemaRef, SourceLocation Loc, Expr *From,
Sema::ContextualImplicitConverter &Converter) {
if (!Converter.match(From->getType()) && !Converter.Suppress)
Converter.diagnoseNoMatch(SemaRef, Loc, From->getType())
<< From->getSourceRange();
return SemaRef.DefaultLvalueConversion(From);
}
static void
collectViableConversionCandidates(Sema &SemaRef, Expr *From, QualType ToType,
UnresolvedSetImpl &ViableConversions,
OverloadCandidateSet &CandidateSet) {
for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) {
DeclAccessPair FoundDecl = ViableConversions[I];
NamedDecl *D = FoundDecl.getDecl();
CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext());
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
CXXConversionDecl *Conv;
FunctionTemplateDecl *ConvTemplate;
if ((ConvTemplate = dyn_cast<FunctionTemplateDecl>(D)))
Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
else
Conv = cast<CXXConversionDecl>(D);
if (ConvTemplate)
SemaRef.AddTemplateConversionCandidate(
ConvTemplate, FoundDecl, ActingContext, From, ToType, CandidateSet,
/*AllowObjCConversionOnExplicit=*/false);
else
SemaRef.AddConversionCandidate(Conv, FoundDecl, ActingContext, From,
ToType, CandidateSet,
/*AllowObjCConversionOnExplicit=*/false);
}
}
/// Attempt to convert the given expression to a type which is accepted
/// by the given converter.
///
/// This routine will attempt to convert an expression of class type to a
/// type accepted by the specified converter. In C++11 and before, the class
/// must have a single non-explicit conversion function converting to a matching
/// type. In C++1y, there can be multiple such conversion functions, but only
/// one target type.
///
/// \param Loc The source location of the construct that requires the
/// conversion.
///
/// \param From The expression we're converting from.
///
/// \param Converter Used to control and diagnose the conversion process.
///
/// \returns The expression, converted to an integral or enumeration type if
/// successful.
ExprResult Sema::PerformContextualImplicitConversion(
SourceLocation Loc, Expr *From, ContextualImplicitConverter &Converter) {
// We can't perform any more checking for type-dependent expressions.
if (From->isTypeDependent())
return From;
// Process placeholders immediately.
if (From->hasPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(From);
if (result.isInvalid())
return result;
From = result.get();
}
// If the expression already has a matching type, we're golden.
QualType T = From->getType();
if (Converter.match(T))
return DefaultLvalueConversion(From);
// FIXME: Check for missing '()' if T is a function type?
// We can only perform contextual implicit conversions on objects of class
// type.
const RecordType *RecordTy = T->getAs<RecordType>();
if (!RecordTy || !getLangOpts().CPlusPlus) {
if (!Converter.Suppress)
Converter.diagnoseNoMatch(*this, Loc, T) << From->getSourceRange();
return From;
}
// We must have a complete class type.
struct TypeDiagnoserPartialDiag : TypeDiagnoser {
ContextualImplicitConverter &Converter;
Expr *From;
TypeDiagnoserPartialDiag(ContextualImplicitConverter &Converter, Expr *From)
: Converter(Converter), From(From) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
Converter.diagnoseIncomplete(S, Loc, T) << From->getSourceRange();
}
} IncompleteDiagnoser(Converter, From);
if (Converter.Suppress ? !isCompleteType(Loc, T)
: RequireCompleteType(Loc, T, IncompleteDiagnoser))
return From;
// Look for a conversion to an integral or enumeration type.
UnresolvedSet<4>
ViableConversions; // These are *potentially* viable in C++1y.
UnresolvedSet<4> ExplicitConversions;
const auto &Conversions =
cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions();
bool HadMultipleCandidates =
(std::distance(Conversions.begin(), Conversions.end()) > 1);
// To check that there is only one target type, in C++1y:
QualType ToType;
bool HasUniqueTargetType = true;
// Collect explicit or viable (potentially in C++1y) conversions.
for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
NamedDecl *D = (*I)->getUnderlyingDecl();
CXXConversionDecl *Conversion;
FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D);
if (ConvTemplate) {
if (getLangOpts().CPlusPlus14)
Conversion = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
else
continue; // C++11 does not consider conversion operator templates(?).
} else
Conversion = cast<CXXConversionDecl>(D);
assert((!ConvTemplate || getLangOpts().CPlusPlus14) &&
"Conversion operator templates are considered potentially "
"viable in C++1y");
QualType CurToType = Conversion->getConversionType().getNonReferenceType();
if (Converter.match(CurToType) || ConvTemplate) {
if (Conversion->isExplicit()) {
// FIXME: For C++1y, do we need this restriction?
// cf. diagnoseNoViableConversion()
if (!ConvTemplate)
ExplicitConversions.addDecl(I.getDecl(), I.getAccess());
} else {
if (!ConvTemplate && getLangOpts().CPlusPlus14) {
if (ToType.isNull())
ToType = CurToType.getUnqualifiedType();
else if (HasUniqueTargetType &&
(CurToType.getUnqualifiedType() != ToType))
HasUniqueTargetType = false;
}
ViableConversions.addDecl(I.getDecl(), I.getAccess());
}
}
}
if (getLangOpts().CPlusPlus14) {
// C++1y [conv]p6:
// ... An expression e of class type E appearing in such a context
// is said to be contextually implicitly converted to a specified
// type T and is well-formed if and only if e can be implicitly
// converted to a type T that is determined as follows: E is searched
// for conversion functions whose return type is cv T or reference to
// cv T such that T is allowed by the context. There shall be
// exactly one such T.
// If no unique T is found:
if (ToType.isNull()) {
if (diagnoseNoViableConversion(*this, Loc, From, Converter, T,
HadMultipleCandidates,
ExplicitConversions))
return ExprError();
return finishContextualImplicitConversion(*this, Loc, From, Converter);
}
// If more than one unique Ts are found:
if (!HasUniqueTargetType)
return diagnoseAmbiguousConversion(*this, Loc, From, Converter, T,
ViableConversions);
// If one unique T is found:
// First, build a candidate set from the previously recorded
// potentially viable conversions.
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
collectViableConversionCandidates(*this, From, ToType, ViableConversions,
CandidateSet);
// Then, perform overload resolution over the candidate set.
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(*this, Loc, Best)) {
case OR_Success: {
// Apply this conversion.
DeclAccessPair Found =
DeclAccessPair::make(Best->Function, Best->FoundDecl.getAccess());
if (recordConversion(*this, Loc, From, Converter, T,
HadMultipleCandidates, Found))
return ExprError();
break;
}
case OR_Ambiguous:
return diagnoseAmbiguousConversion(*this, Loc, From, Converter, T,
ViableConversions);
case OR_No_Viable_Function:
if (diagnoseNoViableConversion(*this, Loc, From, Converter, T,
HadMultipleCandidates,
ExplicitConversions))
return ExprError();
LLVM_FALLTHROUGH;
case OR_Deleted:
// We'll complain below about a non-integral condition type.
break;
}
} else {
switch (ViableConversions.size()) {
case 0: {
if (diagnoseNoViableConversion(*this, Loc, From, Converter, T,
HadMultipleCandidates,
ExplicitConversions))
return ExprError();
// We'll complain below about a non-integral condition type.
break;
}
case 1: {
// Apply this conversion.
DeclAccessPair Found = ViableConversions[0];
if (recordConversion(*this, Loc, From, Converter, T,
HadMultipleCandidates, Found))
return ExprError();
break;
}
default:
return diagnoseAmbiguousConversion(*this, Loc, From, Converter, T,
ViableConversions);
}
}
return finishContextualImplicitConversion(*this, Loc, From, Converter);
}
/// IsAcceptableNonMemberOperatorCandidate - Determine whether Fn is
/// an acceptable non-member overloaded operator for a call whose
/// arguments have types T1 (and, if non-empty, T2). This routine
/// implements the check in C++ [over.match.oper]p3b2 concerning
/// enumeration types.
static bool IsAcceptableNonMemberOperatorCandidate(ASTContext &Context,
FunctionDecl *Fn,
ArrayRef<Expr *> Args) {
QualType T1 = Args[0]->getType();
QualType T2 = Args.size() > 1 ? Args[1]->getType() : QualType();
if (T1->isDependentType() || (!T2.isNull() && T2->isDependentType()))
return true;
if (T1->isRecordType() || (!T2.isNull() && T2->isRecordType()))
return true;
const FunctionProtoType *Proto = Fn->getType()->getAs<FunctionProtoType>();
if (Proto->getNumParams() < 1)
return false;
if (T1->isEnumeralType()) {
QualType ArgType = Proto->getParamType(0).getNonReferenceType();
if (Context.hasSameUnqualifiedType(T1, ArgType))
return true;
}
if (Proto->getNumParams() < 2)
return false;
if (!T2.isNull() && T2->isEnumeralType()) {
QualType ArgType = Proto->getParamType(1).getNonReferenceType();
if (Context.hasSameUnqualifiedType(T2, ArgType))
return true;
}
return false;
}
/// AddOverloadCandidate - Adds the given function to the set of
/// candidate functions, using the given function call arguments. If
/// @p SuppressUserConversions, then don't allow user-defined
/// conversions via constructors or conversion operators.
///
/// \param PartialOverloading true if we are performing "partial" overloading
/// based on an incomplete set of function arguments. This feature is used by
/// code completion.
void
Sema::AddOverloadCandidate(FunctionDecl *Function,
DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions,
bool PartialOverloading,
bool AllowExplicit,
ConversionSequenceList EarlyConversions) {
const FunctionProtoType *Proto
= dyn_cast<FunctionProtoType>(Function->getType()->getAs<FunctionType>());
assert(Proto && "Functions without a prototype cannot be overloaded");
assert(!Function->getDescribedFunctionTemplate() &&
"Use AddTemplateOverloadCandidate for function templates");
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Function)) {
if (!isa<CXXConstructorDecl>(Method)) {
// If we get here, it's because we're calling a member function
// that is named without a member access expression (e.g.,
// "this->f") that was either written explicitly or created
// implicitly. This can happen with a qualified call to a member
// function, e.g., X::f(). We use an empty type for the implied
// object argument (C++ [over.call.func]p3), and the acting context
// is irrelevant.
AddMethodCandidate(Method, FoundDecl, Method->getParent(), QualType(),
Expr::Classification::makeSimpleLValue(), Args,
CandidateSet, SuppressUserConversions,
PartialOverloading, EarlyConversions);
return;
}
// We treat a constructor like a non-member function, since its object
// argument doesn't participate in overload resolution.
}
if (!CandidateSet.isNewCandidate(Function))
return;
// C++ [over.match.oper]p3:
// if no operand has a class type, only those non-member functions in the
// lookup set that have a first parameter of type T1 or "reference to
// (possibly cv-qualified) T1", when T1 is an enumeration type, or (if there
// is a right operand) a second parameter of type T2 or "reference to
// (possibly cv-qualified) T2", when T2 is an enumeration type, are
// candidate functions.
if (CandidateSet.getKind() == OverloadCandidateSet::CSK_Operator &&
!IsAcceptableNonMemberOperatorCandidate(Context, Function, Args))
return;
// C++11 [class.copy]p11: [DR1402]
// A defaulted move constructor that is defined as deleted is ignored by
// overload resolution.
CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Function);
if (Constructor && Constructor->isDefaulted() && Constructor->isDeleted() &&
Constructor->isMoveConstructor())
return;
// Overload resolution is always an unevaluated context.
EnterExpressionEvaluationContext Unevaluated(
*this, Sema::ExpressionEvaluationContext::Unevaluated);
// Add this candidate
OverloadCandidate &Candidate =
CandidateSet.addCandidate(Args.size(), EarlyConversions);
Candidate.FoundDecl = FoundDecl;
Candidate.Function = Function;
Candidate.Viable = true;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
Candidate.ExplicitCallArguments = Args.size();
if (Function->isMultiVersion() && Function->hasAttr<TargetAttr>() &&
!Function->getAttr<TargetAttr>()->isDefaultVersion()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_non_default_multiversion_function;
return;
}
if (Constructor) {
// C++ [class.copy]p3:
// A member function template is never instantiated to perform the copy
// of a class object to an object of its class type.
QualType ClassType = Context.getTypeDeclType(Constructor->getParent());
if (Args.size() == 1 && Constructor->isSpecializationCopyingObject() &&
(Context.hasSameUnqualifiedType(ClassType, Args[0]->getType()) ||
IsDerivedFrom(Args[0]->getLocStart(), Args[0]->getType(),
ClassType))) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_illegal_constructor;
return;
}
// C++ [over.match.funcs]p8: (proposed DR resolution)
// A constructor inherited from class type C that has a first parameter
// of type "reference to P" (including such a constructor instantiated
// from a template) is excluded from the set of candidate functions when
// constructing an object of type cv D if the argument list has exactly
// one argument and D is reference-related to P and P is reference-related
// to C.
auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(FoundDecl.getDecl());
if (Shadow && Args.size() == 1 && Constructor->getNumParams() >= 1 &&
Constructor->getParamDecl(0)->getType()->isReferenceType()) {
QualType P = Constructor->getParamDecl(0)->getType()->getPointeeType();
QualType C = Context.getRecordType(Constructor->getParent());
QualType D = Context.getRecordType(Shadow->getParent());
SourceLocation Loc = Args.front()->getExprLoc();
if ((Context.hasSameUnqualifiedType(P, C) || IsDerivedFrom(Loc, P, C)) &&
(Context.hasSameUnqualifiedType(D, P) || IsDerivedFrom(Loc, D, P))) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_inhctor_slice;
return;
}
}
}
unsigned NumParams = Proto->getNumParams();
// (C++ 13.3.2p2): A candidate function having fewer than m
// parameters is viable only if it has an ellipsis in its parameter
// list (8.3.5).
if (TooManyArguments(NumParams, Args.size(), PartialOverloading) &&
!Proto->isVariadic()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_many_arguments;
return;
}
// (C++ 13.3.2p2): A candidate function having more than m parameters
// is viable only if the (m+1)st parameter has a default argument
// (8.3.6). For the purposes of overload resolution, the
// parameter list is truncated on the right, so that there are
// exactly m parameters.
unsigned MinRequiredArgs = Function->getMinRequiredArguments();
if (Args.size() < MinRequiredArgs && !PartialOverloading) {
// Not enough arguments.
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_few_arguments;
return;
}
// (CUDA B.1): Check for invalid calls between targets.
if (getLangOpts().CUDA)
if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
// Skip the check for callers that are implicit members, because in this
// case we may not yet know what the member's target is; the target is
// inferred for the member automatically, based on the bases and fields of
// the class.
if (!Caller->isImplicit() && !IsAllowedCUDACall(Caller, Function)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_target;
return;
}
// Determine the implicit conversion sequences for each of the
// arguments.
for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
if (Candidate.Conversions[ArgIdx].isInitialized()) {
// We already formed a conversion sequence for this parameter during
// template argument deduction.
} else if (ArgIdx < NumParams) {
// (C++ 13.3.2p3): for F to be a viable function, there shall
// exist for each argument an implicit conversion sequence
// (13.3.3.1) that converts that argument to the corresponding
// parameter of F.
QualType ParamType = Proto->getParamType(ArgIdx);
Candidate.Conversions[ArgIdx]
= TryCopyInitialization(*this, Args[ArgIdx], ParamType,
SuppressUserConversions,
/*InOverloadResolution=*/true,
/*AllowObjCWritebackConversion=*/
getLangOpts().ObjCAutoRefCount,
AllowExplicit);
if (Candidate.Conversions[ArgIdx].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
return;
}
} else {
// (C++ 13.3.2p2): For the purposes of overload resolution, any
// argument for which there is no corresponding parameter is
// considered to ""match the ellipsis" (C+ 13.3.3.1.3).
Candidate.Conversions[ArgIdx].setEllipsis();
}
}
if (EnableIfAttr *FailedAttr = CheckEnableIf(Function, Args)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
return;
}
if (LangOpts.OpenCL && isOpenCLDisabledDecl(Function)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_ext_disabled;
return;
}
}
ObjCMethodDecl *
Sema::SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance,
SmallVectorImpl<ObjCMethodDecl *> &Methods) {
if (Methods.size() <= 1)
return nullptr;
for (unsigned b = 0, e = Methods.size(); b < e; b++) {
bool Match = true;
ObjCMethodDecl *Method = Methods[b];
unsigned NumNamedArgs = Sel.getNumArgs();
// Method might have more arguments than selector indicates. This is due
// to addition of c-style arguments in method.
if (Method->param_size() > NumNamedArgs)
NumNamedArgs = Method->param_size();
if (Args.size() < NumNamedArgs)
continue;
for (unsigned i = 0; i < NumNamedArgs; i++) {
// We can't do any type-checking on a type-dependent argument.
if (Args[i]->isTypeDependent()) {
Match = false;
break;
}
ParmVarDecl *param = Method->parameters()[i];
Expr *argExpr = Args[i];
assert(argExpr && "SelectBestMethod(): missing expression");
// Strip the unbridged-cast placeholder expression off unless it's
// a consumed argument.
if (argExpr->hasPlaceholderType(BuiltinType::ARCUnbridgedCast) &&
!param->hasAttr<CFConsumedAttr>())
argExpr = stripARCUnbridgedCast(argExpr);
// If the parameter is __unknown_anytype, move on to the next method.
if (param->getType() == Context.UnknownAnyTy) {
Match = false;
break;
}
ImplicitConversionSequence ConversionState
= TryCopyInitialization(*this, argExpr, param->getType(),
/*SuppressUserConversions*/false,
/*InOverloadResolution=*/true,
/*AllowObjCWritebackConversion=*/
getLangOpts().ObjCAutoRefCount,
/*AllowExplicit*/false);
// This function looks for a reasonably-exact match, so we consider
// incompatible pointer conversions to be a failure here.
if (ConversionState.isBad() ||
(ConversionState.isStandard() &&
ConversionState.Standard.Second ==
ICK_Incompatible_Pointer_Conversion)) {
Match = false;
break;
}
}
// Promote additional arguments to variadic methods.
if (Match && Method->isVariadic()) {
for (unsigned i = NumNamedArgs, e = Args.size(); i < e; ++i) {
if (Args[i]->isTypeDependent()) {
Match = false;
break;
}
ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod,
nullptr);
if (Arg.isInvalid()) {
Match = false;
break;
}
}
} else {
// Check for extra arguments to non-variadic methods.
if (Args.size() != NumNamedArgs)
Match = false;
else if (Match && NumNamedArgs == 0 && Methods.size() > 1) {
// Special case when selectors have no argument. In this case, select
// one with the most general result type of 'id'.
for (unsigned b = 0, e = Methods.size(); b < e; b++) {
QualType ReturnT = Methods[b]->getReturnType();
if (ReturnT->isObjCIdType())
return Methods[b];
}
}
}
if (Match)
return Method;
}
return nullptr;
}
// specific_attr_iterator iterates over enable_if attributes in reverse, and
// enable_if is order-sensitive. As a result, we need to reverse things
// sometimes. Size of 4 elements is arbitrary.
static SmallVector<EnableIfAttr *, 4>
getOrderedEnableIfAttrs(const FunctionDecl *Function) {
SmallVector<EnableIfAttr *, 4> Result;
if (!Function->hasAttrs())
return Result;
const auto &FuncAttrs = Function->getAttrs();
for (Attr *Attr : FuncAttrs)
if (auto *EnableIf = dyn_cast<EnableIfAttr>(Attr))
Result.push_back(EnableIf);
std::reverse(Result.begin(), Result.end());
return Result;
}
static bool
convertArgsForAvailabilityChecks(Sema &S, FunctionDecl *Function, Expr *ThisArg,
ArrayRef<Expr *> Args, Sema::SFINAETrap &Trap,
bool MissingImplicitThis, Expr *&ConvertedThis,
SmallVectorImpl<Expr *> &ConvertedArgs) {
if (ThisArg) {
CXXMethodDecl *Method = cast<CXXMethodDecl>(Function);
assert(!isa<CXXConstructorDecl>(Method) &&
"Shouldn't have `this` for ctors!");
assert(!Method->isStatic() && "Shouldn't have `this` for static methods!");
ExprResult R = S.PerformObjectArgumentInitialization(
ThisArg, /*Qualifier=*/nullptr, Method, Method);
if (R.isInvalid())
return false;
ConvertedThis = R.get();
} else {
if (auto *MD = dyn_cast<CXXMethodDecl>(Function)) {
(void)MD;
assert((MissingImplicitThis || MD->isStatic() ||
isa<CXXConstructorDecl>(MD)) &&
"Expected `this` for non-ctor instance methods");
}
ConvertedThis = nullptr;
}
// Ignore any variadic arguments. Converting them is pointless, since the
// user can't refer to them in the function condition.
unsigned ArgSizeNoVarargs = std::min(Function->param_size(), Args.size());
// Convert the arguments.
for (unsigned I = 0; I != ArgSizeNoVarargs; ++I) {
ExprResult R;
R = S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
S.Context, Function->getParamDecl(I)),
SourceLocation(), Args[I]);
if (R.isInvalid())
return false;
ConvertedArgs.push_back(R.get());
}
if (Trap.hasErrorOccurred())
return false;
// Push default arguments if needed.
if (!Function->isVariadic() && Args.size() < Function->getNumParams()) {
for (unsigned i = Args.size(), e = Function->getNumParams(); i != e; ++i) {
ParmVarDecl *P = Function->getParamDecl(i);
Expr *DefArg = P->hasUninstantiatedDefaultArg()
? P->getUninstantiatedDefaultArg()
: P->getDefaultArg();
// This can only happen in code completion, i.e. when PartialOverloading
// is true.
if (!DefArg)
return false;
ExprResult R =
S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
S.Context, Function->getParamDecl(i)),
SourceLocation(), DefArg);
if (R.isInvalid())
return false;
ConvertedArgs.push_back(R.get());
}
if (Trap.hasErrorOccurred())
return false;
}
return true;
}
EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis) {
SmallVector<EnableIfAttr *, 4> EnableIfAttrs =
getOrderedEnableIfAttrs(Function);
if (EnableIfAttrs.empty())
return nullptr;
SFINAETrap Trap(*this);
SmallVector<Expr *, 16> ConvertedArgs;
// FIXME: We should look into making enable_if late-parsed.
Expr *DiscardedThis;
if (!convertArgsForAvailabilityChecks(
*this, Function, /*ThisArg=*/nullptr, Args, Trap,
/*MissingImplicitThis=*/true, DiscardedThis, ConvertedArgs))
return EnableIfAttrs[0];
for (auto *EIA : EnableIfAttrs) {
APValue Result;
// FIXME: This doesn't consider value-dependent cases, because doing so is
// very difficult. Ideally, we should handle them more gracefully.
if (!EIA->getCond()->EvaluateWithSubstitution(
Result, Context, Function, llvm::makeArrayRef(ConvertedArgs)))
return EIA;
if (!Result.isInt() || !Result.getInt().getBoolValue())
return EIA;
}
return nullptr;
}
template <typename CheckFn>
static bool diagnoseDiagnoseIfAttrsWith(Sema &S, const NamedDecl *ND,
bool ArgDependent, SourceLocation Loc,
CheckFn &&IsSuccessful) {
SmallVector<const DiagnoseIfAttr *, 8> Attrs;
for (const auto *DIA : ND->specific_attrs<DiagnoseIfAttr>()) {
if (ArgDependent == DIA->getArgDependent())
Attrs.push_back(DIA);
}
// Common case: No diagnose_if attributes, so we can quit early.
if (Attrs.empty())
return false;
auto WarningBegin = std::stable_partition(
Attrs.begin(), Attrs.end(),
[](const DiagnoseIfAttr *DIA) { return DIA->isError(); });
// Note that diagnose_if attributes are late-parsed, so they appear in the
// correct order (unlike enable_if attributes).
auto ErrAttr = llvm::find_if(llvm::make_range(Attrs.begin(), WarningBegin),
IsSuccessful);
if (ErrAttr != WarningBegin) {
const DiagnoseIfAttr *DIA = *ErrAttr;
S.Diag(Loc, diag::err_diagnose_if_succeeded) << DIA->getMessage();
S.Diag(DIA->getLocation(), diag::note_from_diagnose_if)
<< DIA->getParent() << DIA->getCond()->getSourceRange();
return true;
}
for (const auto *DIA : llvm::make_range(WarningBegin, Attrs.end()))
if (IsSuccessful(DIA)) {
S.Diag(Loc, diag::warn_diagnose_if_succeeded) << DIA->getMessage();
S.Diag(DIA->getLocation(), diag::note_from_diagnose_if)
<< DIA->getParent() << DIA->getCond()->getSourceRange();
}
return false;
}
bool Sema::diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc) {
return diagnoseDiagnoseIfAttrsWith(
*this, Function, /*ArgDependent=*/true, Loc,
[&](const DiagnoseIfAttr *DIA) {
APValue Result;
// It's sane to use the same Args for any redecl of this function, since
// EvaluateWithSubstitution only cares about the position of each
// argument in the arg list, not the ParmVarDecl* it maps to.
if (!DIA->getCond()->EvaluateWithSubstitution(
Result, Context, cast<FunctionDecl>(DIA->getParent()), Args, ThisArg))
return false;
return Result.isInt() && Result.getInt().getBoolValue();
});
}
bool Sema::diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc) {
return diagnoseDiagnoseIfAttrsWith(
*this, ND, /*ArgDependent=*/false, Loc,
[&](const DiagnoseIfAttr *DIA) {
bool Result;
return DIA->getCond()->EvaluateAsBooleanCondition(Result, Context) &&
Result;
});
}
/// Add all of the function declarations in the given function set to
/// the overload candidate set.
void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs,
bool SuppressUserConversions,
bool PartialOverloading,
bool FirstArgumentIsBase) {
for (UnresolvedSetIterator F = Fns.begin(), E = Fns.end(); F != E; ++F) {
NamedDecl *D = F.getDecl()->getUnderlyingDecl();
ArrayRef<Expr *> FunctionArgs = Args;
FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D);
FunctionDecl *FD =
FunTmpl ? FunTmpl->getTemplatedDecl() : cast<FunctionDecl>(D);
if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic()) {
QualType ObjectType;
Expr::Classification ObjectClassification;
if (Args.size() > 0) {
if (Expr *E = Args[0]) {
// Use the explicit base to restrict the lookup:
ObjectType = E->getType();
ObjectClassification = E->Classify(Context);
} // .. else there is an implicit base.
FunctionArgs = Args.slice(1);
}
if (FunTmpl) {
AddMethodTemplateCandidate(
FunTmpl, F.getPair(),
cast<CXXRecordDecl>(FunTmpl->getDeclContext()),
ExplicitTemplateArgs, ObjectType, ObjectClassification,
FunctionArgs, CandidateSet, SuppressUserConversions,
PartialOverloading);
} else {
AddMethodCandidate(cast<CXXMethodDecl>(FD), F.getPair(),
cast<CXXMethodDecl>(FD)->getParent(), ObjectType,
ObjectClassification, FunctionArgs, CandidateSet,
SuppressUserConversions, PartialOverloading);
}
} else {
// This branch handles both standalone functions and static methods.
// Slice the first argument (which is the base) when we access
// static method as non-static.
if (Args.size() > 0 &&
(!Args[0] || (FirstArgumentIsBase && isa<CXXMethodDecl>(FD) &&
!isa<CXXConstructorDecl>(FD)))) {
assert(cast<CXXMethodDecl>(FD)->isStatic());
FunctionArgs = Args.slice(1);
}
if (FunTmpl) {
AddTemplateOverloadCandidate(
FunTmpl, F.getPair(), ExplicitTemplateArgs, FunctionArgs,
CandidateSet, SuppressUserConversions, PartialOverloading);
} else {
AddOverloadCandidate(FD, F.getPair(), FunctionArgs, CandidateSet,
SuppressUserConversions, PartialOverloading);
}
}
}
}
/// AddMethodCandidate - Adds a named decl (which is some kind of
/// method) as a method candidate to the given overload set.
void Sema::AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions) {
NamedDecl *Decl = FoundDecl.getDecl();
CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(Decl->getDeclContext());
if (isa<UsingShadowDecl>(Decl))
Decl = cast<UsingShadowDecl>(Decl)->getTargetDecl();
if (FunctionTemplateDecl *TD = dyn_cast<FunctionTemplateDecl>(Decl)) {
assert(isa<CXXMethodDecl>(TD->getTemplatedDecl()) &&
"Expected a member function template");
AddMethodTemplateCandidate(TD, FoundDecl, ActingContext,
/*ExplicitArgs*/ nullptr, ObjectType,
ObjectClassification, Args, CandidateSet,
SuppressUserConversions);
} else {
AddMethodCandidate(cast<CXXMethodDecl>(Decl), FoundDecl, ActingContext,
ObjectType, ObjectClassification, Args, CandidateSet,
SuppressUserConversions);
}
}
/// AddMethodCandidate - Adds the given C++ member function to the set
/// of candidate functions, using the given function call arguments
/// and the object argument (@c Object). For example, in a call
/// @c o.f(a1,a2), @c Object will contain @c o and @c Args will contain
/// both @c a1 and @c a2. If @p SuppressUserConversions, then don't
/// allow user-defined conversions via constructors or conversion
/// operators.
void
Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions,
bool PartialOverloading,
ConversionSequenceList EarlyConversions) {
const FunctionProtoType *Proto
= dyn_cast<FunctionProtoType>(Method->getType()->getAs<FunctionType>());
assert(Proto && "Methods without a prototype cannot be overloaded");
assert(!isa<CXXConstructorDecl>(Method) &&
"Use AddOverloadCandidate for constructors");
if (!CandidateSet.isNewCandidate(Method))
return;
// C++11 [class.copy]p23: [DR1402]
// A defaulted move assignment operator that is defined as deleted is
// ignored by overload resolution.
if (Method->isDefaulted() && Method->isDeleted() &&
Method->isMoveAssignmentOperator())
return;
// Overload resolution is always an unevaluated context.
EnterExpressionEvaluationContext Unevaluated(
*this, Sema::ExpressionEvaluationContext::Unevaluated);
// Add this candidate
OverloadCandidate &Candidate =
CandidateSet.addCandidate(Args.size() + 1, EarlyConversions);
Candidate.FoundDecl = FoundDecl;
Candidate.Function = Method;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
Candidate.ExplicitCallArguments = Args.size();
unsigned NumParams = Proto->getNumParams();
// (C++ 13.3.2p2): A candidate function having fewer than m
// parameters is viable only if it has an ellipsis in its parameter
// list (8.3.5).
if (TooManyArguments(NumParams, Args.size(), PartialOverloading) &&
!Proto->isVariadic()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_many_arguments;
return;
}
// (C++ 13.3.2p2): A candidate function having more than m parameters
// is viable only if the (m+1)st parameter has a default argument
// (8.3.6). For the purposes of overload resolution, the
// parameter list is truncated on the right, so that there are
// exactly m parameters.
unsigned MinRequiredArgs = Method->getMinRequiredArguments();
if (Args.size() < MinRequiredArgs && !PartialOverloading) {
// Not enough arguments.
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_few_arguments;
return;
}
Candidate.Viable = true;
if (Method->isStatic() || ObjectType.isNull())
// The implicit object argument is ignored.
Candidate.IgnoreObjectArgument = true;
else {
// Determine the implicit conversion sequence for the object
// parameter.
Candidate.Conversions[0] = TryObjectArgumentInitialization(
*this, CandidateSet.getLocation(), ObjectType, ObjectClassification,
Method, ActingContext);
if (Candidate.Conversions[0].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
return;
}
}
// (CUDA B.1): Check for invalid calls between targets.
if (getLangOpts().CUDA)
if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
if (!IsAllowedCUDACall(Caller, Method)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_target;
return;
}
// Determine the implicit conversion sequences for each of the
// arguments.
for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
if (Candidate.Conversions[ArgIdx + 1].isInitialized()) {
// We already formed a conversion sequence for this parameter during
// template argument deduction.
} else if (ArgIdx < NumParams) {
// (C++ 13.3.2p3): for F to be a viable function, there shall
// exist for each argument an implicit conversion sequence
// (13.3.3.1) that converts that argument to the corresponding
// parameter of F.
QualType ParamType = Proto->getParamType(ArgIdx);
Candidate.Conversions[ArgIdx + 1]
= TryCopyInitialization(*this, Args[ArgIdx], ParamType,
SuppressUserConversions,
/*InOverloadResolution=*/true,
/*AllowObjCWritebackConversion=*/
getLangOpts().ObjCAutoRefCount);
if (Candidate.Conversions[ArgIdx + 1].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
return;
}
} else {
// (C++ 13.3.2p2): For the purposes of overload resolution, any
// argument for which there is no corresponding parameter is
// considered to "match the ellipsis" (C+ 13.3.3.1.3).
Candidate.Conversions[ArgIdx + 1].setEllipsis();
}
}
if (EnableIfAttr *FailedAttr = CheckEnableIf(Method, Args, true)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
return;
}
if (Method->isMultiVersion() && Method->hasAttr<TargetAttr>() &&
!Method->getAttr<TargetAttr>()->isDefaultVersion()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_non_default_multiversion_function;
}
}
/// Add a C++ member function template as a candidate to the candidate
/// set, using template argument deduction to produce an appropriate member
/// function template specialization.
void
Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions,
bool PartialOverloading) {
if (!CandidateSet.isNewCandidate(MethodTmpl))
return;
// C++ [over.match.funcs]p7:
// In each case where a candidate is a function template, candidate
// function template specializations are generated using template argument
// deduction (14.8.3, 14.8.2). Those candidates are then handled as
// candidate functions in the usual way.113) A given name can refer to one
// or more function templates and also to a set of overloaded non-template
// functions. In such a case, the candidate functions generated from each
// function template are combined with the set of non-template candidate
// functions.
TemplateDeductionInfo Info(CandidateSet.getLocation());
FunctionDecl *Specialization = nullptr;
ConversionSequenceList Conversions;
if (TemplateDeductionResult Result = DeduceTemplateArguments(
MethodTmpl, ExplicitTemplateArgs, Args, Specialization, Info,
PartialOverloading, [&](ArrayRef<QualType> ParamTypes) {
return CheckNonDependentConversions(
MethodTmpl, ParamTypes, Args, CandidateSet, Conversions,
SuppressUserConversions, ActingContext, ObjectType,
ObjectClassification);
})) {
OverloadCandidate &Candidate =
CandidateSet.addCandidate(Conversions.size(), Conversions);
Candidate.FoundDecl = FoundDecl;
Candidate.Function = MethodTmpl->getTemplatedDecl();
Candidate.Viable = false;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument =
cast<CXXMethodDecl>(Candidate.Function)->isStatic() ||
ObjectType.isNull();
Candidate.ExplicitCallArguments = Args.size();
if (Result == TDK_NonDependentConversionFailure)
Candidate.FailureKind = ovl_fail_bad_conversion;
else {
Candidate.FailureKind = ovl_fail_bad_deduction;
Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
Info);
}
return;
}
// Add the function template specialization produced by template argument
// deduction as a candidate.
assert(Specialization && "Missing member function template specialization?");
assert(isa<CXXMethodDecl>(Specialization) &&
"Specialization is not a member function?");
AddMethodCandidate(cast<CXXMethodDecl>(Specialization), FoundDecl,
ActingContext, ObjectType, ObjectClassification, Args,
CandidateSet, SuppressUserConversions, PartialOverloading,
Conversions);
}
/// Add a C++ function template specialization as a candidate
/// in the candidate set, using template argument deduction to produce
/// an appropriate function template specialization.
void
Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions,
bool PartialOverloading) {
if (!CandidateSet.isNewCandidate(FunctionTemplate))
return;
// C++ [over.match.funcs]p7:
// In each case where a candidate is a function template, candidate
// function template specializations are generated using template argument
// deduction (14.8.3, 14.8.2). Those candidates are then handled as
// candidate functions in the usual way.113) A given name can refer to one
// or more function templates and also to a set of overloaded non-template
// functions. In such a case, the candidate functions generated from each
// function template are combined with the set of non-template candidate
// functions.
TemplateDeductionInfo Info(CandidateSet.getLocation());
FunctionDecl *Specialization = nullptr;
ConversionSequenceList Conversions;
if (TemplateDeductionResult Result = DeduceTemplateArguments(
FunctionTemplate, ExplicitTemplateArgs, Args, Specialization, Info,
PartialOverloading, [&](ArrayRef<QualType> ParamTypes) {
return CheckNonDependentConversions(FunctionTemplate, ParamTypes,
Args, CandidateSet, Conversions,
SuppressUserConversions);
})) {
OverloadCandidate &Candidate =
CandidateSet.addCandidate(Conversions.size(), Conversions);
Candidate.FoundDecl = FoundDecl;
Candidate.Function = FunctionTemplate->getTemplatedDecl();
Candidate.Viable = false;
Candidate.IsSurrogate = false;
// Ignore the object argument if there is one, since we don't have an object
// type.
Candidate.IgnoreObjectArgument =
isa<CXXMethodDecl>(Candidate.Function) &&
!isa<CXXConstructorDecl>(Candidate.Function);
Candidate.ExplicitCallArguments = Args.size();
if (Result == TDK_NonDependentConversionFailure)
Candidate.FailureKind = ovl_fail_bad_conversion;
else {
Candidate.FailureKind = ovl_fail_bad_deduction;
Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
Info);
}
return;
}
// Add the function template specialization produced by template argument
// deduction as a candidate.
assert(Specialization && "Missing function template specialization?");
AddOverloadCandidate(Specialization, FoundDecl, Args, CandidateSet,
SuppressUserConversions, PartialOverloading,
/*AllowExplicit*/false, Conversions);
}
/// Check that implicit conversion sequences can be formed for each argument
/// whose corresponding parameter has a non-dependent type, per DR1391's
/// [temp.deduct.call]p10.
bool Sema::CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification) {
// FIXME: The cases in which we allow explicit conversions for constructor
// arguments never consider calling a constructor template. It's not clear
// that is correct.
const bool AllowExplicit = false;
auto *FD = FunctionTemplate->getTemplatedDecl();
auto *Method = dyn_cast<CXXMethodDecl>(FD);
bool HasThisConversion = Method && !isa<CXXConstructorDecl>(Method);
unsigned ThisConversions = HasThisConversion ? 1 : 0;
Conversions =
CandidateSet.allocateConversionSequences(ThisConversions + Args.size());
// Overload resolution is always an unevaluated context.
EnterExpressionEvaluationContext Unevaluated(
*this, Sema::ExpressionEvaluationContext::Unevaluated);
// For a method call, check the 'this' conversion here too. DR1391 doesn't
// require that, but this check should never result in a hard error, and
// overload resolution is permitted to sidestep instantiations.
if (HasThisConversion && !cast<CXXMethodDecl>(FD)->isStatic() &&
!ObjectType.isNull()) {
Conversions[0] = TryObjectArgumentInitialization(
*this, CandidateSet.getLocation(), ObjectType, ObjectClassification,
Method, ActingContext);
if (Conversions[0].isBad())
return true;
}
for (unsigned I = 0, N = std::min(ParamTypes.size(), Args.size()); I != N;
++I) {
QualType ParamType = ParamTypes[I];
if (!ParamType->isDependentType()) {
Conversions[ThisConversions + I]
= TryCopyInitialization(*this, Args[I], ParamType,
SuppressUserConversions,
/*InOverloadResolution=*/true,
/*AllowObjCWritebackConversion=*/
getLangOpts().ObjCAutoRefCount,
AllowExplicit);
if (Conversions[ThisConversions + I].isBad())
return true;
}
}
return false;
}
/// Determine whether this is an allowable conversion from the result
/// of an explicit conversion operator to the expected type, per C++
/// [over.match.conv]p1 and [over.match.ref]p1.
///
/// \param ConvType The return type of the conversion function.
///
/// \param ToType The type we are converting to.
///
/// \param AllowObjCPointerConversion Allow a conversion from one
/// Objective-C pointer to another.
///
/// \returns true if the conversion is allowable, false otherwise.
static bool isAllowableExplicitConversion(Sema &S,
QualType ConvType, QualType ToType,
bool AllowObjCPointerConversion) {
QualType ToNonRefType = ToType.getNonReferenceType();
// Easy case: the types are the same.
if (S.Context.hasSameUnqualifiedType(ConvType, ToNonRefType))
return true;
// Allow qualification conversions.
bool ObjCLifetimeConversion;
if (S.IsQualificationConversion(ConvType, ToNonRefType, /*CStyle*/false,
ObjCLifetimeConversion))
return true;
// If we're not allowed to consider Objective-C pointer conversions,
// we're done.
if (!AllowObjCPointerConversion)
return false;
// Is this an Objective-C pointer conversion?
bool IncompatibleObjC = false;
QualType ConvertedType;
return S.isObjCPointerConversion(ConvType, ToNonRefType, ConvertedType,
IncompatibleObjC);
}
/// AddConversionCandidate - Add a C++ conversion function as a
/// candidate in the candidate set (C++ [over.match.conv],
/// C++ [over.match.copy]). From is the expression we're converting from,
/// and ToType is the type that we're eventually trying to convert to
/// (which may or may not be the same type as the type that the
/// conversion function produces).
void
Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
Expr *From, QualType ToType,
OverloadCandidateSet& CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion) {
assert(!Conversion->getDescribedFunctionTemplate() &&
"Conversion function templates use AddTemplateConversionCandidate");
QualType ConvType = Conversion->getConversionType().getNonReferenceType();
if (!CandidateSet.isNewCandidate(Conversion))
return;
// If the conversion function has an undeduced return type, trigger its
// deduction now.
if (getLangOpts().CPlusPlus14 && ConvType->isUndeducedType()) {
if (DeduceReturnType(Conversion, From->getExprLoc()))
return;
ConvType = Conversion->getConversionType().getNonReferenceType();
}
// If we don't allow any conversion of the result type, ignore conversion
// functions that don't convert to exactly (possibly cv-qualified) T.
if (!AllowResultConversion &&
!Context.hasSameUnqualifiedType(Conversion->getConversionType(), ToType))
return;
// Per C++ [over.match.conv]p1, [over.match.ref]p1, an explicit conversion
// operator is only a candidate if its return type is the target type or
// can be converted to the target type with a qualification conversion.
if (Conversion->isExplicit() &&
!isAllowableExplicitConversion(*this, ConvType, ToType,
AllowObjCConversionOnExplicit))
return;
// Overload resolution is always an unevaluated context.
EnterExpressionEvaluationContext Unevaluated(
*this, Sema::ExpressionEvaluationContext::Unevaluated);
// Add this candidate
OverloadCandidate &Candidate = CandidateSet.addCandidate(1);
Candidate.FoundDecl = FoundDecl;
Candidate.Function = Conversion;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
Candidate.FinalConversion.setAsIdentityConversion();
Candidate.FinalConversion.setFromType(ConvType);
Candidate.FinalConversion.setAllToTypes(ToType);
Candidate.Viable = true;
Candidate.ExplicitCallArguments = 1;
// C++ [over.match.funcs]p4:
// For conversion functions, the function is considered to be a member of
// the class of the implicit implied object argument for the purpose of
// defining the type of the implicit object parameter.
//
// Determine the implicit conversion sequence for the implicit
// object parameter.
QualType ImplicitParamType = From->getType();
if (const PointerType *FromPtrType = ImplicitParamType->getAs<PointerType>())
ImplicitParamType = FromPtrType->getPointeeType();
CXXRecordDecl *ConversionContext
= cast<CXXRecordDecl>(ImplicitParamType->getAs<RecordType>()->getDecl());
Candidate.Conversions[0] = TryObjectArgumentInitialization(
*this, CandidateSet.getLocation(), From->getType(),
From->Classify(Context), Conversion, ConversionContext);
if (Candidate.Conversions[0].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
return;
}
// We won't go through a user-defined type conversion function to convert a
// derived to base as such conversions are given Conversion Rank. They only
// go through a copy constructor. 13.3.3.1.2-p4 [over.ics.user]
QualType FromCanon
= Context.getCanonicalType(From->getType().getUnqualifiedType());
QualType ToCanon = Context.getCanonicalType(ToType).getUnqualifiedType();
if (FromCanon == ToCanon ||
IsDerivedFrom(CandidateSet.getLocation(), FromCanon, ToCanon)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_trivial_conversion;
return;
}
// To determine what the conversion from the result of calling the
// conversion function to the type we're eventually trying to
// convert to (ToType), we need to synthesize a call to the
// conversion function and attempt copy initialization from it. This
// makes sure that we get the right semantics with respect to
// lvalues/rvalues and the type. Fortunately, we can allocate this
// call on the stack and we don't need its arguments to be
// well-formed.
DeclRefExpr ConversionRef(Conversion, false, Conversion->getType(),
VK_LValue, From->getLocStart());
ImplicitCastExpr ConversionFn(ImplicitCastExpr::OnStack,
Context.getPointerType(Conversion->getType()),
CK_FunctionToPointerDecay,
&ConversionRef, VK_RValue);
QualType ConversionType = Conversion->getConversionType();
if (!isCompleteType(From->getLocStart(), ConversionType)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_final_conversion;
return;
}
ExprValueKind VK = Expr::getValueKindForType(ConversionType);
// Note that it is safe to allocate CallExpr on the stack here because
// there are 0 arguments (i.e., nothing is allocated using ASTContext's
// allocator).
QualType CallResultType = ConversionType.getNonLValueExprType(Context);
CallExpr Call(Context, &ConversionFn, None, CallResultType, VK,
From->getLocStart());
ImplicitConversionSequence ICS =
TryCopyInitialization(*this, &Call, ToType,
/*SuppressUserConversions=*/true,
/*InOverloadResolution=*/false,
/*AllowObjCWritebackConversion=*/false);
switch (ICS.getKind()) {
case ImplicitConversionSequence::StandardConversion:
Candidate.FinalConversion = ICS.Standard;
// C++ [over.ics.user]p3:
// If the user-defined conversion is specified by a specialization of a
// conversion function template, the second standard conversion sequence
// shall have exact match rank.
if (Conversion->getPrimaryTemplate() &&
GetConversionRank(ICS.Standard.Second) != ICR_Exact_Match) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_final_conversion_not_exact;
return;
}
// C++0x [dcl.init.ref]p5:
// In the second case, if the reference is an rvalue reference and
// the second standard conversion sequence of the user-defined
// conversion sequence includes an lvalue-to-rvalue conversion, the
// program is ill-formed.
if (ToType->isRValueReferenceType() &&
ICS.Standard.First == ICK_Lvalue_To_Rvalue) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_final_conversion;
return;
}
break;
case ImplicitConversionSequence::BadConversion:
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_final_conversion;
return;
default:
llvm_unreachable(
"Can only end up with a standard conversion sequence or failure");
}
if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
return;
}
if (Conversion->isMultiVersion() && Conversion->hasAttr<TargetAttr>() &&
!Conversion->getAttr<TargetAttr>()->isDefaultVersion()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_non_default_multiversion_function;
}
}
/// Adds a conversion function template specialization
/// candidate to the overload set, using template argument deduction
/// to deduce the template arguments of the conversion function
/// template from the type that we are converting to (C++
/// [temp.deduct.conv]).
void
Sema::AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingDC,
Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet,
bool AllowObjCConversionOnExplicit,
bool AllowResultConversion) {
assert(isa<CXXConversionDecl>(FunctionTemplate->getTemplatedDecl()) &&
"Only conversion function templates permitted here");
if (!CandidateSet.isNewCandidate(FunctionTemplate))
return;
TemplateDeductionInfo Info(CandidateSet.getLocation());
CXXConversionDecl *Specialization = nullptr;
if (TemplateDeductionResult Result
= DeduceTemplateArguments(FunctionTemplate, ToType,
Specialization, Info)) {
OverloadCandidate &Candidate = CandidateSet.addCandidate();
Candidate.FoundDecl = FoundDecl;
Candidate.Function = FunctionTemplate->getTemplatedDecl();
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_deduction;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
Candidate.ExplicitCallArguments = 1;
Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
Info);
return;
}
// Add the conversion function template specialization produced by
// template argument deduction as a candidate.
assert(Specialization && "Missing function template specialization?");
AddConversionCandidate(Specialization, FoundDecl, ActingDC, From, ToType,
CandidateSet, AllowObjCConversionOnExplicit,
AllowResultConversion);
}
/// AddSurrogateCandidate - Adds a "surrogate" candidate function that
/// converts the given @c Object to a function pointer via the
/// conversion function @c Conversion, and then attempts to call it
/// with the given arguments (C++ [over.call.object]p2-4). Proto is
/// the type of function that we'll eventually be calling.
void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet) {
if (!CandidateSet.isNewCandidate(Conversion))
return;
// Overload resolution is always an unevaluated context.
EnterExpressionEvaluationContext Unevaluated(
*this, Sema::ExpressionEvaluationContext::Unevaluated);
OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size() + 1);
Candidate.FoundDecl = FoundDecl;
Candidate.Function = nullptr;
Candidate.Surrogate = Conversion;
Candidate.Viable = true;
Candidate.IsSurrogate = true;
Candidate.IgnoreObjectArgument = false;
Candidate.ExplicitCallArguments = Args.size();
// Determine the implicit conversion sequence for the implicit
// object parameter.
ImplicitConversionSequence ObjectInit = TryObjectArgumentInitialization(
*this, CandidateSet.getLocation(), Object->getType(),
Object->Classify(Context), Conversion, ActingContext);
if (ObjectInit.isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
Candidate.Conversions[0] = ObjectInit;
return;
}
// The first conversion is actually a user-defined conversion whose
// first conversion is ObjectInit's standard conversion (which is
// effectively a reference binding). Record it as such.
Candidate.Conversions[0].setUserDefined();
Candidate.Conversions[0].UserDefined.Before = ObjectInit.Standard;
Candidate.Conversions[0].UserDefined.EllipsisConversion = false;
Candidate.Conversions[0].UserDefined.HadMultipleCandidates = false;
Candidate.Conversions[0].UserDefined.ConversionFunction = Conversion;
Candidate.Conversions[0].UserDefined.FoundConversionFunction = FoundDecl;
Candidate.Conversions[0].UserDefined.After
= Candidate.Conversions[0].UserDefined.Before;
Candidate.Conversions[0].UserDefined.After.setAsIdentityConversion();
// Find the
unsigned NumParams = Proto->getNumParams();
// (C++ 13.3.2p2): A candidate function having fewer than m
// parameters is viable only if it has an ellipsis in its parameter
// list (8.3.5).
if (Args.size() > NumParams && !Proto->isVariadic()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_many_arguments;
return;
}
// Function types don't have any default arguments, so just check if
// we have enough arguments.
if (Args.size() < NumParams) {
// Not enough arguments.
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_too_few_arguments;
return;
}
// Determine the implicit conversion sequences for each of the
// arguments.
for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
if (ArgIdx < NumParams) {
// (C++ 13.3.2p3): for F to be a viable function, there shall
// exist for each argument an implicit conversion sequence
// (13.3.3.1) that converts that argument to the corresponding
// parameter of F.
QualType ParamType = Proto->getParamType(ArgIdx);
Candidate.Conversions[ArgIdx + 1]
= TryCopyInitialization(*this, Args[ArgIdx], ParamType,
/*SuppressUserConversions=*/false,
/*InOverloadResolution=*/false,
/*AllowObjCWritebackConversion=*/
getLangOpts().ObjCAutoRefCount);
if (Candidate.Conversions[ArgIdx + 1].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
return;
}
} else {
// (C++ 13.3.2p2): For the purposes of overload resolution, any
// argument for which there is no corresponding parameter is
// considered to ""match the ellipsis" (C+ 13.3.3.1.3).
Candidate.Conversions[ArgIdx + 1].setEllipsis();
}
}
if (EnableIfAttr *FailedAttr = CheckEnableIf(Conversion, None)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_enable_if;
Candidate.DeductionFailure.Data = FailedAttr;
return;
}
}
/// Add overload candidates for overloaded operators that are
/// member functions.
///
/// Add the overloaded operator candidates that are member functions
/// for the operator Op that was used in an operator expression such
/// as "x Op y". , Args/NumArgs provides the operator arguments, and
/// CandidateSet will store the added overload candidates. (C++
/// [over.match.oper]).
void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange) {
DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
// C++ [over.match.oper]p3:
// For a unary operator @ with an operand of a type whose
// cv-unqualified version is T1, and for a binary operator @ with
// a left operand of a type whose cv-unqualified version is T1 and
// a right operand of a type whose cv-unqualified version is T2,
// three sets of candidate functions, designated member
// candidates, non-member candidates and built-in candidates, are
// constructed as follows:
QualType T1 = Args[0]->getType();
// -- If T1 is a complete class type or a class currently being
// defined, the set of member candidates is the result of the
// qualified lookup of T1::operator@ (13.3.1.1.1); otherwise,
// the set of member candidates is empty.
if (const RecordType *T1Rec = T1->getAs<RecordType>()) {
// Complete the type if it can be completed.
if (!isCompleteType(OpLoc, T1) && !T1Rec->isBeingDefined())
return;
// If the type is neither complete nor being defined, bail out now.
if (!T1Rec->getDecl()->getDefinition())
return;
LookupResult Operators(*this, OpName, OpLoc, LookupOrdinaryName);
LookupQualifiedName(Operators, T1Rec->getDecl());
Operators.suppressDiagnostics();
for (LookupResult::iterator Oper = Operators.begin(),
OperEnd = Operators.end();
Oper != OperEnd;
++Oper)
AddMethodCandidate(Oper.getPair(), Args[0]->getType(),
Args[0]->Classify(Context), Args.slice(1),
CandidateSet, /*SuppressUserConversions=*/false);
}
}
/// AddBuiltinCandidate - Add a candidate for a built-in
/// operator. ResultTy and ParamTys are the result and parameter types
/// of the built-in candidate, respectively. Args and NumArgs are the
/// arguments being passed to the candidate. IsAssignmentOperator
/// should be true when this built-in candidate is an assignment
/// operator. NumContextualBoolArguments is the number of arguments
/// (at the beginning of the argument list) that will be contextually
/// converted to bool.
void Sema::AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator,
unsigned NumContextualBoolArguments) {
// Overload resolution is always an unevaluated context.
EnterExpressionEvaluationContext Unevaluated(
*this, Sema::ExpressionEvaluationContext::Unevaluated);
// Add this candidate
OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size());
Candidate.FoundDecl = DeclAccessPair::make(nullptr, AS_none);
Candidate.Function = nullptr;
Candidate.IsSurrogate = false;
Candidate.IgnoreObjectArgument = false;
std::copy(ParamTys, ParamTys + Args.size(), Candidate.BuiltinParamTypes);
// Determine the implicit conversion sequences for each of the
// arguments.
Candidate.Viable = true;
Candidate.ExplicitCallArguments = Args.size();
for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
// C++ [over.match.oper]p4:
// For the built-in assignment operators, conversions of the
// left operand are restricted as follows:
// -- no temporaries are introduced to hold the left operand, and
// -- no user-defined conversions are applied to the left
// operand to achieve a type match with the left-most
// parameter of a built-in candidate.
//
// We block these conversions by turning off user-defined
// conversions, since that is the only way that initialization of
// a reference to a non-class type can occur from something that
// is not of the same type.
if (ArgIdx < NumContextualBoolArguments) {
assert(ParamTys[ArgIdx] == Context.BoolTy &&
"Contextual conversion to bool requires bool type");
Candidate.Conversions[ArgIdx]
= TryContextuallyConvertToBool(*this, Args[ArgIdx]);
} else {
Candidate.Conversions[ArgIdx]
= TryCopyInitialization(*this, Args[ArgIdx], ParamTys[ArgIdx],
ArgIdx == 0 && IsAssignmentOperator,
/*InOverloadResolution=*/false,
/*AllowObjCWritebackConversion=*/
getLangOpts().ObjCAutoRefCount);
}
if (Candidate.Conversions[ArgIdx].isBad()) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_conversion;
break;
}
}
}
namespace {
/// BuiltinCandidateTypeSet - A set of types that will be used for the
/// candidate operator functions for built-in operators (C++
/// [over.built]). The types are separated into pointer types and
/// enumeration types.
class BuiltinCandidateTypeSet {
/// TypeSet - A set of types.
typedef llvm::SetVector<QualType, SmallVector<QualType, 8>,
llvm::SmallPtrSet<QualType, 8>> TypeSet;
/// PointerTypes - The set of pointer types that will be used in the
/// built-in candidates.
TypeSet PointerTypes;
/// MemberPointerTypes - The set of member pointer types that will be
/// used in the built-in candidates.
TypeSet MemberPointerTypes;
/// EnumerationTypes - The set of enumeration types that will be
/// used in the built-in candidates.
TypeSet EnumerationTypes;
/// The set of vector types that will be used in the built-in
/// candidates.
TypeSet VectorTypes;
/// A flag indicating non-record types are viable candidates
bool HasNonRecordTypes;
/// A flag indicating whether either arithmetic or enumeration types
/// were present in the candidate set.
bool HasArithmeticOrEnumeralTypes;
/// A flag indicating whether the nullptr type was present in the
/// candidate set.
bool HasNullPtrType;
/// Sema - The semantic analysis instance where we are building the
/// candidate type set.
Sema &SemaRef;
/// Context - The AST context in which we will build the type sets.
ASTContext &Context;
bool AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
const Qualifiers &VisibleQuals);
bool AddMemberPointerWithMoreQualifiedTypeVariants(QualType Ty);
public:
/// iterator - Iterates through the types that are part of the set.
typedef TypeSet::iterator iterator;
BuiltinCandidateTypeSet(Sema &SemaRef)
: HasNonRecordTypes(false),
HasArithmeticOrEnumeralTypes(false),
HasNullPtrType(false),
SemaRef(SemaRef),
Context(SemaRef.Context) { }
void AddTypesConvertedFrom(QualType Ty,
SourceLocation Loc,
bool AllowUserConversions,
bool AllowExplicitConversions,
const Qualifiers &VisibleTypeConversionsQuals);
/// pointer_begin - First pointer type found;
iterator pointer_begin() { return PointerTypes.begin(); }
/// pointer_end - Past the last pointer type found;
iterator pointer_end() { return PointerTypes.end(); }
/// member_pointer_begin - First member pointer type found;
iterator member_pointer_begin() { return MemberPointerTypes.begin(); }
/// member_pointer_end - Past the last member pointer type found;
iterator member_pointer_end() { return MemberPointerTypes.end(); }
/// enumeration_begin - First enumeration type found;
iterator enumeration_begin() { return EnumerationTypes.begin(); }
/// enumeration_end - Past the last enumeration type found;
iterator enumeration_end() { return EnumerationTypes.end(); }
iterator vector_begin() { return VectorTypes.begin(); }
iterator vector_end() { return VectorTypes.end(); }
bool hasNonRecordTypes() { return HasNonRecordTypes; }
bool hasArithmeticOrEnumeralTypes() { return HasArithmeticOrEnumeralTypes; }
bool hasNullPtrType() const { return HasNullPtrType; }
};
} // end anonymous namespace
/// AddPointerWithMoreQualifiedTypeVariants - Add the pointer type @p Ty to
/// the set of pointer types along with any more-qualified variants of
/// that type. For example, if @p Ty is "int const *", this routine
/// will add "int const *", "int const volatile *", "int const
/// restrict *", and "int const volatile restrict *" to the set of
/// pointer types. Returns true if the add of @p Ty itself succeeded,
/// false otherwise.
///
/// FIXME: what to do about extended qualifiers?
bool
BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
const Qualifiers &VisibleQuals) {
// Insert this type.
if (!PointerTypes.insert(Ty))
return false;
QualType PointeeTy;
const PointerType *PointerTy = Ty->getAs<PointerType>();
bool buildObjCPtr = false;
if (!PointerTy) {
const ObjCObjectPointerType *PTy = Ty->castAs<ObjCObjectPointerType>();
PointeeTy = PTy->getPointeeType();
buildObjCPtr = true;
} else {
PointeeTy = PointerTy->getPointeeType();
}
// Don't add qualified variants of arrays. For one, they're not allowed
// (the qualifier would sink to the element type), and for another, the
// only overload situation where it matters is subscript or pointer +- int,
// and those shouldn't have qualifier variants anyway.
if (PointeeTy->isArrayType())
return true;
unsigned BaseCVR = PointeeTy.getCVRQualifiers();
bool hasVolatile = VisibleQuals.hasVolatile();
bool hasRestrict = VisibleQuals.hasRestrict();
// Iterate through all strict supersets of BaseCVR.
for (unsigned CVR = BaseCVR+1; CVR <= Qualifiers::CVRMask; ++CVR) {
if ((CVR | BaseCVR) != CVR) continue;
// Skip over volatile if no volatile found anywhere in the types.
if ((CVR & Qualifiers::Volatile) && !hasVolatile) continue;
// Skip over restrict if no restrict found anywhere in the types, or if
// the type cannot be restrict-qualified.
if ((CVR & Qualifiers::Restrict) &&
(!hasRestrict ||
(!(PointeeTy->isAnyPointerType() || PointeeTy->isReferenceType()))))
continue;
// Build qualified pointee type.
QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR);
// Build qualified pointer type.
QualType QPointerTy;
if (!buildObjCPtr)
QPointerTy = Context.getPointerType(QPointeeTy);
else
QPointerTy = Context.getObjCObjectPointerType(QPointeeTy);
// Insert qualified pointer type.
PointerTypes.insert(QPointerTy);
}
return true;
}
/// AddMemberPointerWithMoreQualifiedTypeVariants - Add the pointer type @p Ty
/// to the set of pointer types along with any more-qualified variants of
/// that type. For example, if @p Ty is "int const *", this routine
/// will add "int const *", "int const volatile *", "int const
/// restrict *", and "int const volatile restrict *" to the set of
/// pointer types. Returns true if the add of @p Ty itself succeeded,
/// false otherwise.
///
/// FIXME: what to do about extended qualifiers?
bool
BuiltinCandidateTypeSet::AddMemberPointerWithMoreQualifiedTypeVariants(
QualType Ty) {
// Insert this type.
if (!MemberPointerTypes.insert(Ty))
return false;
const MemberPointerType *PointerTy = Ty->getAs<MemberPointerType>();
assert(PointerTy && "type was not a member pointer type!");
QualType PointeeTy = PointerTy->getPointeeType();
// Don't add qualified variants of arrays. For one, they're not allowed
// (the qualifier would sink to the element type), and for another, the
// only overload situation where it matters is subscript or pointer +- int,
// and those shouldn't have qualifier variants anyway.
if (PointeeTy->isArrayType())
return true;
const Type *ClassTy = PointerTy->getClass();
// Iterate through all strict supersets of the pointee type's CVR
// qualifiers.
unsigned BaseCVR = PointeeTy.getCVRQualifiers();
for (unsigned CVR = BaseCVR+1; CVR <= Qualifiers::CVRMask; ++CVR) {
if ((CVR | BaseCVR) != CVR) continue;
QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR);
MemberPointerTypes.insert(
Context.getMemberPointerType(QPointeeTy, ClassTy));
}
return true;
}
/// AddTypesConvertedFrom - Add each of the types to which the type @p
/// Ty can be implicit converted to the given set of @p Types. We're
/// primarily interested in pointer types and enumeration types. We also
/// take member pointer types, for the conditional operator.
/// AllowUserConversions is true if we should look at the conversion
/// functions of a class type, and AllowExplicitConversions if we
/// should also include the explicit conversion functions of a class
/// type.
void
BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
SourceLocation Loc,
bool AllowUserConversions,
bool AllowExplicitConversions,
const Qualifiers &VisibleQuals) {
// Only deal with canonical types.
Ty = Context.getCanonicalType(Ty);
// Look through reference types; they aren't part of the type of an
// expression for the purposes of conversions.
if (const ReferenceType *RefTy = Ty->getAs<ReferenceType>())
Ty = RefTy->getPointeeType();
// If we're dealing with an array type, decay to the pointer.
if (Ty->isArrayType())
Ty = SemaRef.Context.getArrayDecayedType(Ty);
// Otherwise, we don't care about qualifiers on the type.
Ty = Ty.getLocalUnqualifiedType();
// Flag if we ever add a non-record type.
const RecordType *TyRec = Ty->getAs<RecordType>();
HasNonRecordTypes = HasNonRecordTypes || !TyRec;
// Flag if we encounter an arithmetic type.
HasArithmeticOrEnumeralTypes =
HasArithmeticOrEnumeralTypes || Ty->isArithmeticType();
if (Ty->isObjCIdType() || Ty->isObjCClassType())
PointerTypes.insert(Ty);
else if (Ty->getAs<PointerType>() || Ty->getAs<ObjCObjectPointerType>()) {
// Insert our type, and its more-qualified variants, into the set
// of types.
if (!AddPointerWithMoreQualifiedTypeVariants(Ty, VisibleQuals))
return;
} else if (Ty->isMemberPointerType()) {
// Member pointers are far easier, since the pointee can't be converted.
if (!AddMemberPointerWithMoreQualifiedTypeVariants(Ty))
return;
} else if (Ty->isEnumeralType()) {
HasArithmeticOrEnumeralTypes = true;
EnumerationTypes.insert(Ty);
} else if (Ty->isVectorType()) {
// We treat vector types as arithmetic types in many contexts as an
// extension.
HasArithmeticOrEnumeralTypes = true;
VectorTypes.insert(Ty);
} else if (Ty->isNullPtrType()) {
HasNullPtrType = true;
} else if (AllowUserConversions && TyRec) {
// No conversion functions in incomplete types.
if (!SemaRef.isCompleteType(Loc, Ty))
return;
CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
for (NamedDecl *D : ClassDecl->getVisibleConversionFunctions()) {
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
// Skip conversion function templates; they don't tell us anything
// about which builtin types we can convert to.
if (isa<FunctionTemplateDecl>(D))
continue;
CXXConversionDecl *Conv = cast<CXXConversionDecl>(D);
if (AllowExplicitConversions || !Conv->isExplicit()) {
AddTypesConvertedFrom(Conv->getConversionType(), Loc, false, false,
VisibleQuals);
}
}
}
}
/// Helper function for AddBuiltinOperatorCandidates() that adds
/// the volatile- and non-volatile-qualified assignment operators for the
/// given type to the candidate set.
static void AddBuiltinAssignmentOperatorCandidates(Sema &S,
QualType T,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet) {
QualType ParamTypes[2];
// T& operator=(T&, T)
ParamTypes[0] = S.Context.getLValueReferenceType(T);
ParamTypes[1] = T;
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/true);
if (!S.Context.getCanonicalType(T).isVolatileQualified()) {
// volatile T& operator=(volatile T&, T)
ParamTypes[0]
= S.Context.getLValueReferenceType(S.Context.getVolatileType(T));
ParamTypes[1] = T;
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/true);
}
}
/// CollectVRQualifiers - This routine returns Volatile/Restrict qualifiers,
/// if any, found in visible type conversion functions found in ArgExpr's type.
static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) {
Qualifiers VRQuals;
const RecordType *TyRec;
if (const MemberPointerType *RHSMPType =
ArgExpr->getType()->getAs<MemberPointerType>())
TyRec = RHSMPType->getClass()->getAs<RecordType>();
else
TyRec = ArgExpr->getType()->getAs<RecordType>();
if (!TyRec) {
// Just to be safe, assume the worst case.
VRQuals.addVolatile();
VRQuals.addRestrict();
return VRQuals;
}
CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
if (!ClassDecl->hasDefinition())
return VRQuals;
for (NamedDecl *D : ClassDecl->getVisibleConversionFunctions()) {
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
if (CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(D)) {
QualType CanTy = Context.getCanonicalType(Conv->getConversionType());
if (const ReferenceType *ResTypeRef = CanTy->getAs<ReferenceType>())
CanTy = ResTypeRef->getPointeeType();
// Need to go down the pointer/mempointer chain and add qualifiers
// as see them.
bool done = false;
while (!done) {
if (CanTy.isRestrictQualified())
VRQuals.addRestrict();
if (const PointerType *ResTypePtr = CanTy->getAs<PointerType>())
CanTy = ResTypePtr->getPointeeType();
else if (const MemberPointerType *ResTypeMPtr =
CanTy->getAs<MemberPointerType>())
CanTy = ResTypeMPtr->getPointeeType();
else
done = true;
if (CanTy.isVolatileQualified())
VRQuals.addVolatile();
if (VRQuals.hasRestrict() && VRQuals.hasVolatile())
return VRQuals;
}
}
}
return VRQuals;
}
namespace {
/// Helper class to manage the addition of builtin operator overload
/// candidates. It provides shared state and utility methods used throughout
/// the process, as well as a helper method to add each group of builtin
/// operator overloads from the standard to a candidate set.
class BuiltinOperatorOverloadBuilder {
// Common instance state available to all overload candidate addition methods.
Sema &S;
ArrayRef<Expr *> Args;
Qualifiers VisibleTypeConversionsQuals;
bool HasArithmeticOrEnumeralCandidateType;
SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes;
OverloadCandidateSet &CandidateSet;
static constexpr int ArithmeticTypesCap = 24;
SmallVector<CanQualType, ArithmeticTypesCap> ArithmeticTypes;
// Define some indices used to iterate over the arithemetic types in
// ArithmeticTypes. The "promoted arithmetic types" are the arithmetic
// types are that preserved by promotion (C++ [over.built]p2).
unsigned FirstIntegralType,
LastIntegralType;
unsigned FirstPromotedIntegralType,
LastPromotedIntegralType;
unsigned FirstPromotedArithmeticType,
LastPromotedArithmeticType;
unsigned NumArithmeticTypes;
void InitArithmeticTypes() {
// Start of promoted types.
FirstPromotedArithmeticType = 0;
ArithmeticTypes.push_back(S.Context.FloatTy);
ArithmeticTypes.push_back(S.Context.DoubleTy);
ArithmeticTypes.push_back(S.Context.LongDoubleTy);
if (S.Context.getTargetInfo().hasFloat128Type())
ArithmeticTypes.push_back(S.Context.Float128Ty);
// Start of integral types.
FirstIntegralType = ArithmeticTypes.size();
FirstPromotedIntegralType = ArithmeticTypes.size();
ArithmeticTypes.push_back(S.Context.IntTy);
ArithmeticTypes.push_back(S.Context.LongTy);
ArithmeticTypes.push_back(S.Context.LongLongTy);
if (S.Context.getTargetInfo().hasInt128Type())
ArithmeticTypes.push_back(S.Context.Int128Ty);
ArithmeticTypes.push_back(S.Context.UnsignedIntTy);
ArithmeticTypes.push_back(S.Context.UnsignedLongTy);
ArithmeticTypes.push_back(S.Context.UnsignedLongLongTy);
if (S.Context.getTargetInfo().hasInt128Type())
ArithmeticTypes.push_back(S.Context.UnsignedInt128Ty);
LastPromotedIntegralType = ArithmeticTypes.size();
LastPromotedArithmeticType = ArithmeticTypes.size();
// End of promoted types.
ArithmeticTypes.push_back(S.Context.BoolTy);
ArithmeticTypes.push_back(S.Context.CharTy);
ArithmeticTypes.push_back(S.Context.WCharTy);
if (S.Context.getLangOpts().Char8)
ArithmeticTypes.push_back(S.Context.Char8Ty);
ArithmeticTypes.push_back(S.Context.Char16Ty);
ArithmeticTypes.push_back(S.Context.Char32Ty);
ArithmeticTypes.push_back(S.Context.SignedCharTy);
ArithmeticTypes.push_back(S.Context.ShortTy);
ArithmeticTypes.push_back(S.Context.UnsignedCharTy);
ArithmeticTypes.push_back(S.Context.UnsignedShortTy);
LastIntegralType = ArithmeticTypes.size();
NumArithmeticTypes = ArithmeticTypes.size();
// End of integral types.
// FIXME: What about complex? What about half?
assert(ArithmeticTypes.size() <= ArithmeticTypesCap &&
"Enough inline storage for all arithmetic types.");
}
/// Helper method to factor out the common pattern of adding overloads
/// for '++' and '--' builtin operators.
void addPlusPlusMinusMinusStyleOverloads(QualType CandidateTy,
bool HasVolatile,
bool HasRestrict) {
QualType ParamTypes[2] = {
S.Context.getLValueReferenceType(CandidateTy),
S.Context.IntTy
};
// Non-volatile version.
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
// Use a heuristic to reduce number of builtin candidates in the set:
// add volatile version only if there are conversions to a volatile type.
if (HasVolatile) {
ParamTypes[0] =
S.Context.getLValueReferenceType(
S.Context.getVolatileType(CandidateTy));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
// Add restrict version only if there are conversions to a restrict type
// and our candidate type is a non-restrict-qualified pointer.
if (HasRestrict && CandidateTy->isAnyPointerType() &&
!CandidateTy.isRestrictQualified()) {
ParamTypes[0]
= S.Context.getLValueReferenceType(
S.Context.getCVRQualifiedType(CandidateTy, Qualifiers::Restrict));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
if (HasVolatile) {
ParamTypes[0]
= S.Context.getLValueReferenceType(
S.Context.getCVRQualifiedType(CandidateTy,
(Qualifiers::Volatile |
Qualifiers::Restrict)));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
}
}
public:
BuiltinOperatorOverloadBuilder(
Sema &S, ArrayRef<Expr *> Args,
Qualifiers VisibleTypeConversionsQuals,
bool HasArithmeticOrEnumeralCandidateType,
SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes,
OverloadCandidateSet &CandidateSet)
: S(S), Args(Args),
VisibleTypeConversionsQuals(VisibleTypeConversionsQuals),
HasArithmeticOrEnumeralCandidateType(
HasArithmeticOrEnumeralCandidateType),
CandidateTypes(CandidateTypes),
CandidateSet(CandidateSet) {
InitArithmeticTypes();
}
// Increment is deprecated for bool since C++17.
//
// C++ [over.built]p3:
//
// For every pair (T, VQ), where T is an arithmetic type other
// than bool, and VQ is either volatile or empty, there exist
// candidate operator functions of the form
//
// VQ T& operator++(VQ T&);
// T operator++(VQ T&, int);
//
// C++ [over.built]p4:
//
// For every pair (T, VQ), where T is an arithmetic type other
// than bool, and VQ is either volatile or empty, there exist
// candidate operator functions of the form
//
// VQ T& operator--(VQ T&);
// T operator--(VQ T&, int);
void addPlusPlusMinusMinusArithmeticOverloads(OverloadedOperatorKind Op) {
if (!HasArithmeticOrEnumeralCandidateType)
return;
for (unsigned Arith = 0; Arith < NumArithmeticTypes; ++Arith) {
const auto TypeOfT = ArithmeticTypes[Arith];
if (TypeOfT == S.Context.BoolTy) {
if (Op == OO_MinusMinus)
continue;
if (Op == OO_PlusPlus && S.getLangOpts().CPlusPlus17)
continue;
}
addPlusPlusMinusMinusStyleOverloads(
TypeOfT,
VisibleTypeConversionsQuals.hasVolatile(),
VisibleTypeConversionsQuals.hasRestrict());
}
}
// C++ [over.built]p5:
//
// For every pair (T, VQ), where T is a cv-qualified or
// cv-unqualified object type, and VQ is either volatile or
// empty, there exist candidate operator functions of the form
//
// T*VQ& operator++(T*VQ&);
// T*VQ& operator--(T*VQ&);
// T* operator++(T*VQ&, int);
// T* operator--(T*VQ&, int);
void addPlusPlusMinusMinusPointerOverloads() {
for (BuiltinCandidateTypeSet::iterator
Ptr = CandidateTypes[0].pointer_begin(),
PtrEnd = CandidateTypes[0].pointer_end();
Ptr != PtrEnd; ++Ptr) {
// Skip pointer types that aren't pointers to object types.
if (!(*Ptr)->getPointeeType()->isObjectType())
continue;
addPlusPlusMinusMinusStyleOverloads(*Ptr,
(!(*Ptr).isVolatileQualified() &&
VisibleTypeConversionsQuals.hasVolatile()),
(!(*Ptr).isRestrictQualified() &&
VisibleTypeConversionsQuals.hasRestrict()));
}
}
// C++ [over.built]p6:
// For every cv-qualified or cv-unqualified object type T, there
// exist candidate operator functions of the form
//
// T& operator*(T*);
//
// C++ [over.built]p7:
// For every function type T that does not have cv-qualifiers or a
// ref-qualifier, there exist candidate operator functions of the form
// T& operator*(T*);
void addUnaryStarPointerOverloads() {
for (BuiltinCandidateTypeSet::iterator
Ptr = CandidateTypes[0].pointer_begin(),
PtrEnd = CandidateTypes[0].pointer_end();
Ptr != PtrEnd; ++Ptr) {
QualType ParamTy = *Ptr;
QualType PointeeTy = ParamTy->getPointeeType();
if (!PointeeTy->isObjectType() && !PointeeTy->isFunctionType())
continue;
if (const FunctionProtoType *Proto =PointeeTy->getAs<FunctionProtoType>())
if (Proto->getTypeQuals() || Proto->getRefQualifier())
continue;
S.AddBuiltinCandidate(&ParamTy, Args, CandidateSet);
}
}
// C++ [over.built]p9:
// For every promoted arithmetic type T, there exist candidate
// operator functions of the form
//
// T operator+(T);
// T operator-(T);
void addUnaryPlusOrMinusArithmeticOverloads() {
if (!HasArithmeticOrEnumeralCandidateType)
return;
for (unsigned Arith = FirstPromotedArithmeticType;
Arith < LastPromotedArithmeticType; ++Arith) {
QualType ArithTy = ArithmeticTypes[Arith];
S.AddBuiltinCandidate(&ArithTy, Args, CandidateSet);
}
// Extension: We also add these operators for vector types.
for (BuiltinCandidateTypeSet::iterator
Vec = CandidateTypes[0].vector_begin(),
VecEnd = CandidateTypes[0].vector_end();
Vec != VecEnd; ++Vec) {
QualType VecTy = *Vec;
S.AddBuiltinCandidate(&VecTy, Args, CandidateSet);
}
}
// C++ [over.built]p8:
// For every type T, there exist candidate operator functions of
// the form
//
// T* operator+(T*);
void addUnaryPlusPointerOverloads() {
for (BuiltinCandidateTypeSet::iterator
Ptr = CandidateTypes[0].pointer_begin(),
PtrEnd = CandidateTypes[0].pointer_end();
Ptr != PtrEnd; ++Ptr) {
QualType ParamTy = *Ptr;
S.AddBuiltinCandidate(&ParamTy, Args, CandidateSet);
}
}
// C++ [over.built]p10:
// For every promoted integral type T, there exist candidate
// operator functions of the form
//
// T operator~(T);
void addUnaryTildePromotedIntegralOverloads() {
if (!HasArithmeticOrEnumeralCandidateType)
return;
for (unsigned Int = FirstPromotedIntegralType;
Int < LastPromotedIntegralType; ++Int) {
QualType IntTy = ArithmeticTypes[Int];
S.AddBuiltinCandidate(&IntTy, Args, CandidateSet);
}
// Extension: We also add this operator for vector types.
for (BuiltinCandidateTypeSet::iterator
Vec = CandidateTypes[0].vector_begin(),
VecEnd = CandidateTypes[0].vector_end();
Vec != VecEnd; ++Vec) {
QualType VecTy = *Vec;
S.AddBuiltinCandidate(&VecTy, Args, CandidateSet);
}
}
// C++ [over.match.oper]p16:
// For every pointer to member type T or type std::nullptr_t, there
// exist candidate operator functions of the form
//
// bool operator==(T,T);
// bool operator!=(T,T);
void addEqualEqualOrNotEqualMemberPointerOrNullptrOverloads() {
/// Set of (canonical) types that we've already handled.
llvm::SmallPtrSet<QualType, 8> AddedTypes;
for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
for (BuiltinCandidateTypeSet::iterator
MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
MemPtr != MemPtrEnd;
++MemPtr) {
// Don't add the same builtin candidate twice.
if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second)
continue;
QualType ParamTypes[2] = { *MemPtr, *MemPtr };
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
if (CandidateTypes[ArgIdx].hasNullPtrType()) {
CanQualType NullPtrTy = S.Context.getCanonicalType(S.Context.NullPtrTy);
if (AddedTypes.insert(NullPtrTy).second) {
QualType ParamTypes[2] = { NullPtrTy, NullPtrTy };
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
}
}
}
// C++ [over.built]p15:
//
// For every T, where T is an enumeration type or a pointer type,
// there exist candidate operator functions of the form
//
// bool operator<(T, T);
// bool operator>(T, T);
// bool operator<=(T, T);
// bool operator>=(T, T);
// bool operator==(T, T);
// bool operator!=(T, T);
// R operator<=>(T, T)
void addGenericBinaryPointerOrEnumeralOverloads() {
// C++ [over.match.oper]p3:
// [...]the built-in candidates include all of the candidate operator
// functions defined in 13.6 that, compared to the given operator, [...]
// do not have the same parameter-type-list as any non-template non-member
// candidate.
//
// Note that in practice, this only affects enumeration types because there
// aren't any built-in candidates of record type, and a user-defined operator
// must have an operand of record or enumeration type. Also, the only other
// overloaded operator with enumeration arguments, operator=,
// cannot be overloaded for enumeration types, so this is the only place
// where we must suppress candidates like this.
llvm::DenseSet<std::pair<CanQualType, CanQualType> >
UserDefinedBinaryOperators;
for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
if (CandidateTypes[ArgIdx].enumeration_begin() !=
CandidateTypes[ArgIdx].enumeration_end()) {
for (OverloadCandidateSet::iterator C = CandidateSet.begin(),
CEnd = CandidateSet.end();
C != CEnd; ++C) {
if (!C->Viable || !C->Function || C->Function->getNumParams() != 2)
continue;
if (C->Function->isFunctionTemplateSpecialization())
continue;
QualType FirstParamType =
C->Function->getParamDecl(0)->getType().getUnqualifiedType();
QualType SecondParamType =
C->Function->getParamDecl(1)->getType().getUnqualifiedType();
// Skip if either parameter isn't of enumeral type.
if (!FirstParamType->isEnumeralType() ||
!SecondParamType->isEnumeralType())
continue;
// Add this operator to the set of known user-defined operators.
UserDefinedBinaryOperators.insert(
std::make_pair(S.Context.getCanonicalType(FirstParamType),
S.Context.getCanonicalType(SecondParamType)));
}
}
}
/// Set of (canonical) types that we've already handled.
llvm::SmallPtrSet<QualType, 8> AddedTypes;
for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
for (BuiltinCandidateTypeSet::iterator
Ptr = CandidateTypes[ArgIdx].pointer_begin(),
PtrEnd = CandidateTypes[ArgIdx].pointer_end();
Ptr != PtrEnd; ++Ptr) {
// Don't add the same builtin candidate twice.
if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
continue;
QualType ParamTypes[2] = { *Ptr, *Ptr };
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
for (BuiltinCandidateTypeSet::iterator
Enum = CandidateTypes[ArgIdx].enumeration_begin(),
EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
Enum != EnumEnd; ++Enum) {
CanQualType CanonType = S.Context.getCanonicalType(*Enum);
// Don't add the same builtin candidate twice, or if a user defined
// candidate exists.
if (!AddedTypes.insert(CanonType).second ||
UserDefinedBinaryOperators.count(std::make_pair(CanonType,
CanonType)))
continue;
QualType ParamTypes[2] = { *Enum, *Enum };
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
}
}
// C++ [over.built]p13:
//
// For every cv-qualified or cv-unqualified object type T
// there exist candidate operator functions of the form
//
// T* operator+(T*, ptrdiff_t);
// T& operator[](T*, ptrdiff_t); [BELOW]
// T* operator-(T*, ptrdiff_t);
// T* operator+(ptrdiff_t, T*);
// T& operator[](ptrdiff_t, T*); [BELOW]
//
// C++ [over.built]p14:
//
// For every T, where T is a pointer to object type, there
// exist candidate operator functions of the form
//
// ptrdiff_t operator-(T, T);
void addBinaryPlusOrMinusPointerOverloads(OverloadedOperatorKind Op) {
/// Set of (canonical) types that we've already handled.
llvm::SmallPtrSet<QualType, 8> AddedTypes;
for (int Arg = 0; Arg < 2; ++Arg) {
QualType AsymmetricParamTypes[2] = {
S.Context.getPointerDiffType(),
S.Context.getPointerDiffType(),
};
for (BuiltinCandidateTypeSet::iterator
Ptr = CandidateTypes[Arg].pointer_begin(),
PtrEnd = CandidateTypes[Arg].pointer_end();
Ptr != PtrEnd; ++Ptr) {
QualType PointeeTy = (*Ptr)->getPointeeType();
if (!PointeeTy->isObjectType())
continue;
AsymmetricParamTypes[Arg] = *Ptr;
if (Arg == 0 || Op == OO_Plus) {
// operator+(T*, ptrdiff_t) or operator-(T*, ptrdiff_t)
// T* operator+(ptrdiff_t, T*);
S.AddBuiltinCandidate(AsymmetricParamTypes, Args, CandidateSet);
}
if (Op == OO_Minus) {
// ptrdiff_t operator-(T, T);
if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
continue;
QualType ParamTypes[2] = { *Ptr, *Ptr };
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
}
}
}
// C++ [over.built]p12:
//
// For every pair of promoted arithmetic types L and R, there
// exist candidate operator functions of the form
//
// LR operator*(L, R);
// LR operator/(L, R);
// LR operator+(L, R);
// LR operator-(L, R);
// bool operator<(L, R);
// bool operator>(L, R);
// bool operator<=(L, R);
// bool operator>=(L, R);
// bool operator==(L, R);
// bool operator!=(L, R);
//
// where LR is the result of the usual arithmetic conversions
// between types L and R.
//
// C++ [over.built]p24:
//
// For every pair of promoted arithmetic types L and R, there exist
// candidate operator functions of the form
//
// LR operator?(bool, L, R);
//
// where LR is the result of the usual arithmetic conversions
// between types L and R.
// Our candidates ignore the first parameter.
void addGenericBinaryArithmeticOverloads() {
if (!HasArithmeticOrEnumeralCandidateType)
return;
for (unsigned Left = FirstPromotedArithmeticType;
Left < LastPromotedArithmeticType; ++Left) {
for (unsigned Right = FirstPromotedArithmeticType;
Right < LastPromotedArithmeticType; ++Right) {
QualType LandR[2] = { ArithmeticTypes[Left],
ArithmeticTypes[Right] };
S.AddBuiltinCandidate(LandR, Args, CandidateSet);
}
}
// Extension: Add the binary operators ==, !=, <, <=, >=, >, *, /, and the
// conditional operator for vector types.
for (BuiltinCandidateTypeSet::iterator
Vec1 = CandidateTypes[0].vector_begin(),
Vec1End = CandidateTypes[0].vector_end();
Vec1 != Vec1End; ++Vec1) {
for (BuiltinCandidateTypeSet::iterator
Vec2 = CandidateTypes[1].vector_begin(),
Vec2End = CandidateTypes[1].vector_end();
Vec2 != Vec2End; ++Vec2) {
QualType LandR[2] = { *Vec1, *Vec2 };
S.AddBuiltinCandidate(LandR, Args, CandidateSet);
}
}
}
// C++2a [over.built]p14:
//
// For every integral type T there exists a candidate operator function
// of the form
//
// std::strong_ordering operator<=>(T, T)
//
// C++2a [over.built]p15:
//
// For every pair of floating-point types L and R, there exists a candidate
// operator function of the form
//
// std::partial_ordering operator<=>(L, R);
//
// FIXME: The current specification for integral types doesn't play nice with
// the direction of p0946r0, which allows mixed integral and unscoped-enum
// comparisons. Under the current spec this can lead to ambiguity during
// overload resolution. For example:
//
// enum A : int {a};
// auto x = (a <=> (long)42);
//
// error: call is ambiguous for arguments 'A' and 'long'.
// note: candidate operator<=>(int, int)
// note: candidate operator<=>(long, long)
//
// To avoid this error, this function deviates from the specification and adds
// the mixed overloads `operator<=>(L, R)` where L and R are promoted
// arithmetic types (the same as the generic relational overloads).
//
// For now this function acts as a placeholder.
void addThreeWayArithmeticOverloads() {
addGenericBinaryArithmeticOverloads();
}
// C++ [over.built]p17:
//
// For every pair of promoted integral types L and R, there
// exist candidate operator functions of the form
//
// LR operator%(L, R);
// LR operator&(L, R);
// LR operator^(L, R);
// LR operator|(L, R);
// L operator<<(L, R);
// L operator>>(L, R);
//
// where LR is the result of the usual arithmetic conversions
// between types L and R.
void addBinaryBitwiseArithmeticOverloads(OverloadedOperatorKind Op) {
if (!HasArithmeticOrEnumeralCandidateType)
return;
for (unsigned Left = FirstPromotedIntegralType;
Left < LastPromotedIntegralType; ++Left) {
for (unsigned Right = FirstPromotedIntegralType;
Right < LastPromotedIntegralType; ++Right) {
QualType LandR[2] = { ArithmeticTypes[Left],
ArithmeticTypes[Right] };
S.AddBuiltinCandidate(LandR, Args, CandidateSet);
}
}
}
// C++ [over.built]p20:
//
// For every pair (T, VQ), where T is an enumeration or
// pointer to member type and VQ is either volatile or
// empty, there exist candidate operator functions of the form
//
// VQ T& operator=(VQ T&, T);
void addAssignmentMemberPointerOrEnumeralOverloads() {
/// Set of (canonical) types that we've already handled.
llvm::SmallPtrSet<QualType, 8> AddedTypes;
for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
for (BuiltinCandidateTypeSet::iterator
Enum = CandidateTypes[ArgIdx].enumeration_begin(),
EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
Enum != EnumEnd; ++Enum) {
if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)).second)
continue;
AddBuiltinAssignmentOperatorCandidates(S, *Enum, Args, CandidateSet);
}
for (BuiltinCandidateTypeSet::iterator
MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
MemPtr != MemPtrEnd; ++MemPtr) {
if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second)
continue;
AddBuiltinAssignmentOperatorCandidates(S, *MemPtr, Args, CandidateSet);
}
}
}
// C++ [over.built]p19:
//
// For every pair (T, VQ), where T is any type and VQ is either
// volatile or empty, there exist candidate operator functions
// of the form
//
// T*VQ& operator=(T*VQ&, T*);
//
// C++ [over.built]p21:
//
// For every pair (T, VQ), where T is a cv-qualified or
// cv-unqualified object type and VQ is either volatile or
// empty, there exist candidate operator functions of the form
//
// T*VQ& operator+=(T*VQ&, ptrdiff_t);
// T*VQ& operator-=(T*VQ&, ptrdiff_t);
void addAssignmentPointerOverloads(bool isEqualOp) {
/// Set of (canonical) types that we've already handled.
llvm::SmallPtrSet<QualType, 8> AddedTypes;
for (BuiltinCandidateTypeSet::iterator
Ptr = CandidateTypes[0].pointer_begin(),
PtrEnd = CandidateTypes[0].pointer_end();
Ptr != PtrEnd; ++Ptr) {
// If this is operator=, keep track of the builtin candidates we added.
if (isEqualOp)
AddedTypes.insert(S.Context.getCanonicalType(*Ptr));
else if (!(*Ptr)->getPointeeType()->isObjectType())
continue;
// non-volatile version
QualType ParamTypes[2] = {
S.Context.getLValueReferenceType(*Ptr),
isEqualOp ? *Ptr : S.Context.getPointerDiffType(),
};
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/ isEqualOp);
bool NeedVolatile = !(*Ptr).isVolatileQualified() &&
VisibleTypeConversionsQuals.hasVolatile();
if (NeedVolatile) {
// volatile version
ParamTypes[0] =
S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/isEqualOp);
}
if (!(*Ptr).isRestrictQualified() &&
VisibleTypeConversionsQuals.hasRestrict()) {
// restrict version
ParamTypes[0]
= S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/isEqualOp);
if (NeedVolatile) {
// volatile restrict version
ParamTypes[0]
= S.Context.getLValueReferenceType(
S.Context.getCVRQualifiedType(*Ptr,
(Qualifiers::Volatile |
Qualifiers::Restrict)));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/isEqualOp);
}
}
}
if (isEqualOp) {
for (BuiltinCandidateTypeSet::iterator
Ptr = CandidateTypes[1].pointer_begin(),
PtrEnd = CandidateTypes[1].pointer_end();
Ptr != PtrEnd; ++Ptr) {
// Make sure we don't add the same candidate twice.
if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
continue;
QualType ParamTypes[2] = {
S.Context.getLValueReferenceType(*Ptr),
*Ptr,
};
// non-volatile version
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/true);
bool NeedVolatile = !(*Ptr).isVolatileQualified() &&
VisibleTypeConversionsQuals.hasVolatile();
if (NeedVolatile) {
// volatile version
ParamTypes[0] =
S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/true);
}
if (!(*Ptr).isRestrictQualified() &&
VisibleTypeConversionsQuals.hasRestrict()) {
// restrict version
ParamTypes[0]
= S.Context.getLValueReferenceType(S.Context.getRestrictType(*Ptr));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/true);
if (NeedVolatile) {
// volatile restrict version
ParamTypes[0]
= S.Context.getLValueReferenceType(
S.Context.getCVRQualifiedType(*Ptr,
(Qualifiers::Volatile |
Qualifiers::Restrict)));
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/true);
}
}
}
}
}
// C++ [over.built]p18:
//
// For every triple (L, VQ, R), where L is an arithmetic type,
// VQ is either volatile or empty, and R is a promoted
// arithmetic type, there exist candidate operator functions of
// the form
//
// VQ L& operator=(VQ L&, R);
// VQ L& operator*=(VQ L&, R);
// VQ L& operator/=(VQ L&, R);
// VQ L& operator+=(VQ L&, R);
// VQ L& operator-=(VQ L&, R);
void addAssignmentArithmeticOverloads(bool isEqualOp) {
if (!HasArithmeticOrEnumeralCandidateType)
return;
for (unsigned Left = 0; Left < NumArithmeticTypes; ++Left) {
for (unsigned Right = FirstPromotedArithmeticType;
Right < LastPromotedArithmeticType; ++Right) {
QualType ParamTypes[2];
ParamTypes[1] = ArithmeticTypes[Right];
// Add this built-in operator as a candidate (VQ is empty).
ParamTypes[0] =
S.Context.getLValueReferenceType(ArithmeticTypes[Left]);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/isEqualOp);
// Add this built-in operator as a candidate (VQ is 'volatile').
if (VisibleTypeConversionsQuals.hasVolatile()) {
ParamTypes[0] =
S.Context.getVolatileType(ArithmeticTypes[Left]);
ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/isEqualOp);
}
}
}
// Extension: Add the binary operators =, +=, -=, *=, /= for vector types.
for (BuiltinCandidateTypeSet::iterator
Vec1 = CandidateTypes[0].vector_begin(),
Vec1End = CandidateTypes[0].vector_end();
Vec1 != Vec1End; ++Vec1) {
for (BuiltinCandidateTypeSet::iterator
Vec2 = CandidateTypes[1].vector_begin(),
Vec2End = CandidateTypes[1].vector_end();
Vec2 != Vec2End; ++Vec2) {
QualType ParamTypes[2];
ParamTypes[1] = *Vec2;
// Add this built-in operator as a candidate (VQ is empty).
ParamTypes[0] = S.Context.getLValueReferenceType(*Vec1);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/isEqualOp);
// Add this built-in operator as a candidate (VQ is 'volatile').
if (VisibleTypeConversionsQuals.hasVolatile()) {
ParamTypes[0] = S.Context.getVolatileType(*Vec1);
ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssigmentOperator=*/isEqualOp);
}
}
}
}
// C++ [over.built]p22:
//
// For every triple (L, VQ, R), where L is an integral type, VQ
// is either volatile or empty, and R is a promoted integral
// type, there exist candidate operator functions of the form
//
// VQ L& operator%=(VQ L&, R);
// VQ L& operator<<=(VQ L&, R);
// VQ L& operator>>=(VQ L&, R);
// VQ L& operator&=(VQ L&, R);
// VQ L& operator^=(VQ L&, R);
// VQ L& operator|=(VQ L&, R);
void addAssignmentIntegralOverloads() {
if (!HasArithmeticOrEnumeralCandidateType)
return;
for (unsigned Left = FirstIntegralType; Left < LastIntegralType; ++Left) {
for (unsigned Right = FirstPromotedIntegralType;
Right < LastPromotedIntegralType; ++Right) {
QualType ParamTypes[2];
ParamTypes[1] = ArithmeticTypes[Right];
// Add this built-in operator as a candidate (VQ is empty).
ParamTypes[0] =
S.Context.getLValueReferenceType(ArithmeticTypes[Left]);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
if (VisibleTypeConversionsQuals.hasVolatile()) {
// Add this built-in operator as a candidate (VQ is 'volatile').
ParamTypes[0] = ArithmeticTypes[Left];
ParamTypes[0] = S.Context.getVolatileType(ParamTypes[0]);
ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
}
}
}
// C++ [over.operator]p23:
//
// There also exist candidate operator functions of the form
//
// bool operator!(bool);
// bool operator&&(bool, bool);
// bool operator||(bool, bool);
void addExclaimOverload() {
QualType ParamTy = S.Context.BoolTy;
S.AddBuiltinCandidate(&ParamTy, Args, CandidateSet,
/*IsAssignmentOperator=*/false,
/*NumContextualBoolArguments=*/1);
}
void addAmpAmpOrPipePipeOverload() {
QualType ParamTypes[2] = { S.Context.BoolTy, S.Context.BoolTy };
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
/*IsAssignmentOperator=*/false,
/*NumContextualBoolArguments=*/2);
}
// C++ [over.built]p13:
//
// For every cv-qualified or cv-unqualified object type T there
// exist candidate operator functions of the form
//
// T* operator+(T*, ptrdiff_t); [ABOVE]
// T& operator[](T*, ptrdiff_t);
// T* operator-(T*, ptrdiff_t); [ABOVE]
// T* operator+(ptrdiff_t, T*); [ABOVE]
// T& operator[](ptrdiff_t, T*);
void addSubscriptOverloads() {
for (BuiltinCandidateTypeSet::iterator
Ptr = CandidateTypes[0].pointer_begin(),
PtrEnd = CandidateTypes[0].pointer_end();
Ptr != PtrEnd; ++Ptr) {
QualType ParamTypes[2] = { *Ptr, S.Context.getPointerDiffType() };
QualType PointeeType = (*Ptr)->getPointeeType();
if (!PointeeType->isObjectType())
continue;
// T& operator[](T*, ptrdiff_t)
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
for (BuiltinCandidateTypeSet::iterator
Ptr = CandidateTypes[1].pointer_begin(),
PtrEnd = CandidateTypes[1].pointer_end();
Ptr != PtrEnd; ++Ptr) {
QualType ParamTypes[2] = { S.Context.getPointerDiffType(), *Ptr };
QualType PointeeType = (*Ptr)->getPointeeType();
if (!PointeeType->isObjectType())
continue;
// T& operator[](ptrdiff_t, T*)
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
}
// C++ [over.built]p11:
// For every quintuple (C1, C2, T, CV1, CV2), where C2 is a class type,
// C1 is the same type as C2 or is a derived class of C2, T is an object
// type or a function type, and CV1 and CV2 are cv-qualifier-seqs,
// there exist candidate operator functions of the form
//
// CV12 T& operator->*(CV1 C1*, CV2 T C2::*);
//
// where CV12 is the union of CV1 and CV2.
void addArrowStarOverloads() {
for (BuiltinCandidateTypeSet::iterator
Ptr = CandidateTypes[0].pointer_begin(),
PtrEnd = CandidateTypes[0].pointer_end();
Ptr != PtrEnd; ++Ptr) {
QualType C1Ty = (*Ptr);
QualType C1;
QualifierCollector Q1;
C1 = QualType(Q1.strip(C1Ty->getPointeeType()), 0);
if (!isa<RecordType>(C1))
continue;
// heuristic to reduce number of builtin candidates in the set.
// Add volatile/restrict version only if there are conversions to a
// volatile/restrict type.
if (!VisibleTypeConversionsQuals.hasVolatile() && Q1.hasVolatile())
continue;
if (!VisibleTypeConversionsQuals.hasRestrict() && Q1.hasRestrict())
continue;
for (BuiltinCandidateTypeSet::iterator
MemPtr = CandidateTypes[1].member_pointer_begin(),
MemPtrEnd = CandidateTypes[1].member_pointer_end();
MemPtr != MemPtrEnd; ++MemPtr) {
const MemberPointerType *mptr = cast<MemberPointerType>(*MemPtr);
QualType C2 = QualType(mptr->getClass(), 0);
C2 = C2.getUnqualifiedType();
if (C1 != C2 && !S.IsDerivedFrom(CandidateSet.getLocation(), C1, C2))
break;
QualType ParamTypes[2] = { *Ptr, *MemPtr };
// build CV12 T&
QualType T = mptr->getPointeeType();
if (!VisibleTypeConversionsQuals.hasVolatile() &&
T.isVolatileQualified())
continue;
if (!VisibleTypeConversionsQuals.hasRestrict() &&
T.isRestrictQualified())
continue;
T = Q1.apply(S.Context, T);
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
}
}
// Note that we don't consider the first argument, since it has been
// contextually converted to bool long ago. The candidates below are
// therefore added as binary.
//
// C++ [over.built]p25:
// For every type T, where T is a pointer, pointer-to-member, or scoped
// enumeration type, there exist candidate operator functions of the form
//
// T operator?(bool, T, T);
//
void addConditionalOperatorOverloads() {
/// Set of (canonical) types that we've already handled.
llvm::SmallPtrSet<QualType, 8> AddedTypes;
for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
for (BuiltinCandidateTypeSet::iterator
Ptr = CandidateTypes[ArgIdx].pointer_begin(),
PtrEnd = CandidateTypes[ArgIdx].pointer_end();
Ptr != PtrEnd; ++Ptr) {
if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)).second)
continue;
QualType ParamTypes[2] = { *Ptr, *Ptr };
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
for (BuiltinCandidateTypeSet::iterator
MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
MemPtr != MemPtrEnd; ++MemPtr) {
if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)).second)
continue;
QualType ParamTypes[2] = { *MemPtr, *MemPtr };
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
if (S.getLangOpts().CPlusPlus11) {
for (BuiltinCandidateTypeSet::iterator
Enum = CandidateTypes[ArgIdx].enumeration_begin(),
EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
Enum != EnumEnd; ++Enum) {
if (!(*Enum)->getAs<EnumType>()->getDecl()->isScoped())
continue;
if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)).second)
continue;
QualType ParamTypes[2] = { *Enum, *Enum };
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
}
}
}
}
};
} // end anonymous namespace
/// AddBuiltinOperatorCandidates - Add the appropriate built-in
/// operator overloads to the candidate set (C++ [over.built]), based
/// on the operator @p Op and the arguments given. For example, if the
/// operator is a binary '+', this routine might add "int
/// operator+(int, int)" to cover integer addition.
void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet) {
// Find all of the types that the arguments can convert to, but only
// if the operator we're looking at has built-in operator candidates
// that make use of these types. Also record whether we encounter non-record
// candidate types or either arithmetic or enumeral candidate types.
Qualifiers VisibleTypeConversionsQuals;
VisibleTypeConversionsQuals.addConst();
for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx)
VisibleTypeConversionsQuals += CollectVRQualifiers(Context, Args[ArgIdx]);
bool HasNonRecordCandidateType = false;
bool HasArithmeticOrEnumeralCandidateType = false;
SmallVector<BuiltinCandidateTypeSet, 2> CandidateTypes;
for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
CandidateTypes.emplace_back(*this);
CandidateTypes[ArgIdx].AddTypesConvertedFrom(Args[ArgIdx]->getType(),
OpLoc,
true,
(Op == OO_Exclaim ||
Op == OO_AmpAmp ||
Op == OO_PipePipe),
VisibleTypeConversionsQuals);
HasNonRecordCandidateType = HasNonRecordCandidateType ||
CandidateTypes[ArgIdx].hasNonRecordTypes();
HasArithmeticOrEnumeralCandidateType =
HasArithmeticOrEnumeralCandidateType ||
CandidateTypes[ArgIdx].hasArithmeticOrEnumeralTypes();
}
// Exit early when no non-record types have been added to the candidate set
// for any of the arguments to the operator.
//
// We can't exit early for !, ||, or &&, since there we have always have
// 'bool' overloads.
if (!HasNonRecordCandidateType &&
!(Op == OO_Exclaim || Op == OO_AmpAmp || Op == OO_PipePipe))
return;
// Setup an object to manage the common state for building overloads.
BuiltinOperatorOverloadBuilder OpBuilder(*this, Args,
VisibleTypeConversionsQuals,
HasArithmeticOrEnumeralCandidateType,
CandidateTypes, CandidateSet);
// Dispatch over the operation to add in only those overloads which apply.
switch (Op) {
case OO_None:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Expected an overloaded operator");
case OO_New:
case OO_Delete:
case OO_Array_New:
case OO_Array_Delete:
case OO_Call:
llvm_unreachable(
"Special operators don't use AddBuiltinOperatorCandidates");
case OO_Comma:
case OO_Arrow:
case OO_Coawait:
// C++ [over.match.oper]p3:
// -- For the operator ',', the unary operator '&', the
// operator '->', or the operator 'co_await', the
// built-in candidates set is empty.
break;
case OO_Plus: // '+' is either unary or binary
if (Args.size() == 1)
OpBuilder.addUnaryPlusPointerOverloads();
LLVM_FALLTHROUGH;
case OO_Minus: // '-' is either unary or binary
if (Args.size() == 1) {
OpBuilder.addUnaryPlusOrMinusArithmeticOverloads();
} else {
OpBuilder.addBinaryPlusOrMinusPointerOverloads(Op);
OpBuilder.addGenericBinaryArithmeticOverloads();
}
break;
case OO_Star: // '*' is either unary or binary
if (Args.size() == 1)
OpBuilder.addUnaryStarPointerOverloads();
else
OpBuilder.addGenericBinaryArithmeticOverloads();
break;
case OO_Slash:
OpBuilder.addGenericBinaryArithmeticOverloads();
break;
case OO_PlusPlus:
case OO_MinusMinus:
OpBuilder.addPlusPlusMinusMinusArithmeticOverloads(Op);
OpBuilder.addPlusPlusMinusMinusPointerOverloads();
break;
case OO_EqualEqual:
case OO_ExclaimEqual:
OpBuilder.addEqualEqualOrNotEqualMemberPointerOrNullptrOverloads();
LLVM_FALLTHROUGH;
case OO_Less:
case OO_Greater:
case OO_LessEqual:
case OO_GreaterEqual:
OpBuilder.addGenericBinaryPointerOrEnumeralOverloads();
OpBuilder.addGenericBinaryArithmeticOverloads();
break;
case OO_Spaceship:
OpBuilder.addGenericBinaryPointerOrEnumeralOverloads();
OpBuilder.addThreeWayArithmeticOverloads();
break;
case OO_Percent:
case OO_Caret:
case OO_Pipe:
case OO_LessLess:
case OO_GreaterGreater:
OpBuilder.addBinaryBitwiseArithmeticOverloads(Op);
break;
case OO_Amp: // '&' is either unary or binary
if (Args.size() == 1)
// C++ [over.match.oper]p3:
// -- For the operator ',', the unary operator '&', or the
// operator '->', the built-in candidates set is empty.
break;
OpBuilder.addBinaryBitwiseArithmeticOverloads(Op);
break;
case OO_Tilde:
OpBuilder.addUnaryTildePromotedIntegralOverloads();
break;
case OO_Equal:
OpBuilder.addAssignmentMemberPointerOrEnumeralOverloads();
LLVM_FALLTHROUGH;
case OO_PlusEqual:
case OO_MinusEqual:
OpBuilder.addAssignmentPointerOverloads(Op == OO_Equal);
LLVM_FALLTHROUGH;
case OO_StarEqual:
case OO_SlashEqual:
OpBuilder.addAssignmentArithmeticOverloads(Op == OO_Equal);
break;
case OO_PercentEqual:
case OO_LessLessEqual:
case OO_GreaterGreaterEqual:
case OO_AmpEqual:
case OO_CaretEqual:
case OO_PipeEqual:
OpBuilder.addAssignmentIntegralOverloads();
break;
case OO_Exclaim:
OpBuilder.addExclaimOverload();
break;
case OO_AmpAmp:
case OO_PipePipe:
OpBuilder.addAmpAmpOrPipePipeOverload();
break;
case OO_Subscript:
OpBuilder.addSubscriptOverloads();
break;
case OO_ArrowStar:
OpBuilder.addArrowStarOverloads();
break;
case OO_Conditional:
OpBuilder.addConditionalOperatorOverloads();
OpBuilder.addGenericBinaryArithmeticOverloads();
break;
}
}
/// Add function candidates found via argument-dependent lookup
/// to the set of overloading candidates.
///
/// This routine performs argument-dependent name lookup based on the
/// given function name (which may also be an operator name) and adds
/// all of the overload candidates found by ADL to the overload
/// candidate set (C++ [basic.lookup.argdep]).
void
Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading) {
ADLResult Fns;
// FIXME: This approach for uniquing ADL results (and removing
// redundant candidates from the set) relies on pointer-equality,
// which means we need to key off the canonical decl. However,
// always going back to the canonical decl might not get us the
// right set of default arguments. What default arguments are
// we supposed to consider on ADL candidates, anyway?
// FIXME: Pass in the explicit template arguments?
ArgumentDependentLookup(Name, Loc, Args, Fns);
// Erase all of the candidates we already knew about.
for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(),
CandEnd = CandidateSet.end();
Cand != CandEnd; ++Cand)
if (Cand->Function) {
Fns.erase(Cand->Function);
if (FunctionTemplateDecl *FunTmpl = Cand->Function->getPrimaryTemplate())
Fns.erase(FunTmpl);
}
// For each of the ADL candidates we found, add it to the overload
// set.
for (ADLResult::iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
DeclAccessPair FoundDecl = DeclAccessPair::make(*I, AS_none);
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
if (ExplicitTemplateArgs)
continue;
AddOverloadCandidate(FD, FoundDecl, Args, CandidateSet, false,
PartialOverloading);
} else
AddTemplateOverloadCandidate(cast<FunctionTemplateDecl>(*I),
FoundDecl, ExplicitTemplateArgs,
Args, CandidateSet, PartialOverloading);
}
}
namespace {
enum class Comparison { Equal, Better, Worse };
}
/// Compares the enable_if attributes of two FunctionDecls, for the purposes of
/// overload resolution.
///
/// Cand1's set of enable_if attributes are said to be "better" than Cand2's iff
/// Cand1's first N enable_if attributes have precisely the same conditions as
/// Cand2's first N enable_if attributes (where N = the number of enable_if
/// attributes on Cand2), and Cand1 has more than N enable_if attributes.
///
/// Note that you can have a pair of candidates such that Cand1's enable_if
/// attributes are worse than Cand2's, and Cand2's enable_if attributes are
/// worse than Cand1's.
static Comparison compareEnableIfAttrs(const Sema &S, const FunctionDecl *Cand1,
const FunctionDecl *Cand2) {
// Common case: One (or both) decls don't have enable_if attrs.
bool Cand1Attr = Cand1->hasAttr<EnableIfAttr>();
bool Cand2Attr = Cand2->hasAttr<EnableIfAttr>();
if (!Cand1Attr || !Cand2Attr) {
if (Cand1Attr == Cand2Attr)
return Comparison::Equal;
return Cand1Attr ? Comparison::Better : Comparison::Worse;
}
// FIXME: The next several lines are just
// specific_attr_iterator<EnableIfAttr> but going in declaration order,
// instead of reverse order which is how they're stored in the AST.
auto Cand1Attrs = getOrderedEnableIfAttrs(Cand1);
auto Cand2Attrs = getOrderedEnableIfAttrs(Cand2);
// It's impossible for Cand1 to be better than (or equal to) Cand2 if Cand1
// has fewer enable_if attributes than Cand2.
if (Cand1Attrs.size() < Cand2Attrs.size())
return Comparison::Worse;
auto Cand1I = Cand1Attrs.begin();
llvm::FoldingSetNodeID Cand1ID, Cand2ID;
for (auto &Cand2A : Cand2Attrs) {
Cand1ID.clear();
Cand2ID.clear();
auto &Cand1A = *Cand1I++;
Cand1A->getCond()->Profile(Cand1ID, S.getASTContext(), true);
Cand2A->getCond()->Profile(Cand2ID, S.getASTContext(), true);
if (Cand1ID != Cand2ID)
return Comparison::Worse;
}
return Cand1I == Cand1Attrs.end() ? Comparison::Equal : Comparison::Better;
}
static bool isBetterMultiversionCandidate(const OverloadCandidate &Cand1,
const OverloadCandidate &Cand2) {
if (!Cand1.Function || !Cand1.Function->isMultiVersion() || !Cand2.Function ||
!Cand2.Function->isMultiVersion())
return false;
// If this is a cpu_dispatch/cpu_specific multiversion situation, prefer
// cpu_dispatch, else arbitrarily based on the identifiers.
bool Cand1CPUDisp = Cand1.Function->hasAttr<CPUDispatchAttr>();
bool Cand2CPUDisp = Cand2.Function->hasAttr<CPUDispatchAttr>();
const auto *Cand1CPUSpec = Cand1.Function->getAttr<CPUSpecificAttr>();
const auto *Cand2CPUSpec = Cand2.Function->getAttr<CPUSpecificAttr>();
if (!Cand1CPUDisp && !Cand2CPUDisp && !Cand1CPUSpec && !Cand2CPUSpec)
return false;
if (Cand1CPUDisp && !Cand2CPUDisp)
return true;
if (Cand2CPUDisp && !Cand1CPUDisp)
return false;
if (Cand1CPUSpec && Cand2CPUSpec) {
if (Cand1CPUSpec->cpus_size() != Cand2CPUSpec->cpus_size())
return Cand1CPUSpec->cpus_size() < Cand2CPUSpec->cpus_size();
std::pair<CPUSpecificAttr::cpus_iterator, CPUSpecificAttr::cpus_iterator>
FirstDiff = std::mismatch(
Cand1CPUSpec->cpus_begin(), Cand1CPUSpec->cpus_end(),
Cand2CPUSpec->cpus_begin(),
[](const IdentifierInfo *LHS, const IdentifierInfo *RHS) {
return LHS->getName() == RHS->getName();
});
assert(FirstDiff.first != Cand1CPUSpec->cpus_end() &&
"Two different cpu-specific versions should not have the same "
"identifier list, otherwise they'd be the same decl!");
return (*FirstDiff.first)->getName() < (*FirstDiff.second)->getName();
}
llvm_unreachable("No way to get here unless both had cpu_dispatch");
}
/// isBetterOverloadCandidate - Determines whether the first overload
/// candidate is a better candidate than the second (C++ 13.3.3p1).
bool clang::isBetterOverloadCandidate(
Sema &S, const OverloadCandidate &Cand1, const OverloadCandidate &Cand2,
SourceLocation Loc, OverloadCandidateSet::CandidateSetKind Kind) {
// Define viable functions to be better candidates than non-viable
// functions.
if (!Cand2.Viable)
return Cand1.Viable;
else if (!Cand1.Viable)
return false;
// C++ [over.match.best]p1:
//
// -- if F is a static member function, ICS1(F) is defined such
// that ICS1(F) is neither better nor worse than ICS1(G) for
// any function G, and, symmetrically, ICS1(G) is neither
// better nor worse than ICS1(F).
unsigned StartArg = 0;
if (Cand1.IgnoreObjectArgument || Cand2.IgnoreObjectArgument)
StartArg = 1;
auto IsIllFormedConversion = [&](const ImplicitConversionSequence &ICS) {
// We don't allow incompatible pointer conversions in C++.
if (!S.getLangOpts().CPlusPlus)
return ICS.isStandard() &&
ICS.Standard.Second == ICK_Incompatible_Pointer_Conversion;
// The only ill-formed conversion we allow in C++ is the string literal to
// char* conversion, which is only considered ill-formed after C++11.
return S.getLangOpts().CPlusPlus11 && !S.getLangOpts().WritableStrings &&
hasDeprecatedStringLiteralToCharPtrConversion(ICS);
};
// Define functions that don't require ill-formed conversions for a given
// argument to be better candidates than functions that do.
unsigned NumArgs = Cand1.Conversions.size();
assert(Cand2.Conversions.size() == NumArgs && "Overload candidate mismatch");
bool HasBetterConversion = false;
for (unsigned ArgIdx = StartArg; ArgIdx < NumArgs; ++ArgIdx) {
bool Cand1Bad = IsIllFormedConversion(Cand1.Conversions[ArgIdx]);
bool Cand2Bad = IsIllFormedConversion(Cand2.Conversions[ArgIdx]);
if (Cand1Bad != Cand2Bad) {
if (Cand1Bad)
return false;
HasBetterConversion = true;
}
}
if (HasBetterConversion)
return true;
// C++ [over.match.best]p1:
// A viable function F1 is defined to be a better function than another
// viable function F2 if for all arguments i, ICSi(F1) is not a worse
// conversion sequence than ICSi(F2), and then...
for (unsigned ArgIdx = StartArg; ArgIdx < NumArgs; ++ArgIdx) {
switch (CompareImplicitConversionSequences(S, Loc,
Cand1.Conversions[ArgIdx],
Cand2.Conversions[ArgIdx])) {
case ImplicitConversionSequence::Better:
// Cand1 has a better conversion sequence.
HasBetterConversion = true;
break;
case ImplicitConversionSequence::Worse:
// Cand1 can't be better than Cand2.
return false;
case ImplicitConversionSequence::Indistinguishable:
// Do nothing.
break;
}
}
// -- for some argument j, ICSj(F1) is a better conversion sequence than
// ICSj(F2), or, if not that,
if (HasBetterConversion)
return true;
// -- the context is an initialization by user-defined conversion
// (see 8.5, 13.3.1.5) and the standard conversion sequence
// from the return type of F1 to the destination type (i.e.,
// the type of the entity being initialized) is a better
// conversion sequence than the standard conversion sequence
// from the return type of F2 to the destination type.
if (Kind == OverloadCandidateSet::CSK_InitByUserDefinedConversion &&
Cand1.Function && Cand2.Function &&
isa<CXXConversionDecl>(Cand1.Function) &&
isa<CXXConversionDecl>(Cand2.Function)) {
// First check whether we prefer one of the conversion functions over the
// other. This only distinguishes the results in non-standard, extension
// cases such as the conversion from a lambda closure type to a function
// pointer or block.
ImplicitConversionSequence::CompareKind Result =
compareConversionFunctions(S, Cand1.Function, Cand2.Function);
if (Result == ImplicitConversionSequence::Indistinguishable)
Result = CompareStandardConversionSequences(S, Loc,
Cand1.FinalConversion,
Cand2.FinalConversion);
if (Result != ImplicitConversionSequence::Indistinguishable)
return Result == ImplicitConversionSequence::Better;
// FIXME: Compare kind of reference binding if conversion functions
// convert to a reference type used in direct reference binding, per
// C++14 [over.match.best]p1 section 2 bullet 3.
}
// FIXME: Work around a defect in the C++17 guaranteed copy elision wording,
// as combined with the resolution to CWG issue 243.
//
// When the context is initialization by constructor ([over.match.ctor] or
// either phase of [over.match.list]), a constructor is preferred over
// a conversion function.
if (Kind == OverloadCandidateSet::CSK_InitByConstructor && NumArgs == 1 &&
Cand1.Function && Cand2.Function &&
isa<CXXConstructorDecl>(Cand1.Function) !=
isa<CXXConstructorDecl>(Cand2.Function))
return isa<CXXConstructorDecl>(Cand1.Function);
// -- F1 is a non-template function and F2 is a function template
// specialization, or, if not that,
bool Cand1IsSpecialization = Cand1.Function &&
Cand1.Function->getPrimaryTemplate();
bool Cand2IsSpecialization = Cand2.Function &&
Cand2.Function->getPrimaryTemplate();
if (Cand1IsSpecialization != Cand2IsSpecialization)
return Cand2IsSpecialization;
// -- F1 and F2 are function template specializations, and the function
// template for F1 is more specialized than the template for F2
// according to the partial ordering rules described in 14.5.5.2, or,
// if not that,
if (Cand1IsSpecialization && Cand2IsSpecialization) {
if (FunctionTemplateDecl *BetterTemplate
= S.getMoreSpecializedTemplate(Cand1.Function->getPrimaryTemplate(),
Cand2.Function->getPrimaryTemplate(),
Loc,
isa<CXXConversionDecl>(Cand1.Function)? TPOC_Conversion
: TPOC_Call,
Cand1.ExplicitCallArguments,
Cand2.ExplicitCallArguments))
return BetterTemplate == Cand1.Function->getPrimaryTemplate();
}
// FIXME: Work around a defect in the C++17 inheriting constructor wording.
// A derived-class constructor beats an (inherited) base class constructor.
bool Cand1IsInherited =
dyn_cast_or_null<ConstructorUsingShadowDecl>(Cand1.FoundDecl.getDecl());
bool Cand2IsInherited =
dyn_cast_or_null<ConstructorUsingShadowDecl>(Cand2.FoundDecl.getDecl());
if (Cand1IsInherited != Cand2IsInherited)
return Cand2IsInherited;
else if (Cand1IsInherited) {
assert(Cand2IsInherited);
auto *Cand1Class = cast<CXXRecordDecl>(Cand1.Function->getDeclContext());
auto *Cand2Class = cast<CXXRecordDecl>(Cand2.Function->getDeclContext());
if (Cand1Class->isDerivedFrom(Cand2Class))
return true;
if (Cand2Class->isDerivedFrom(Cand1Class))
return false;
// Inherited from sibling base classes: still ambiguous.
}
// Check C++17 tie-breakers for deduction guides.
{
auto *Guide1 = dyn_cast_or_null<CXXDeductionGuideDecl>(Cand1.Function);
auto *Guide2 = dyn_cast_or_null<CXXDeductionGuideDecl>(Cand2.Function);
if (Guide1 && Guide2) {
// -- F1 is generated from a deduction-guide and F2 is not
if (Guide1->isImplicit() != Guide2->isImplicit())
return Guide2->isImplicit();
// -- F1 is the copy deduction candidate(16.3.1.8) and F2 is not
if (Guide1->isCopyDeductionCandidate())
return true;
}
}
// Check for enable_if value-based overload resolution.
if (Cand1.Function && Cand2.Function) {
Comparison Cmp = compareEnableIfAttrs(S, Cand1.Function, Cand2.Function);
if (Cmp != Comparison::Equal)
return Cmp == Comparison::Better;
}
if (S.getLangOpts().CUDA && Cand1.Function && Cand2.Function) {
FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext);
return S.IdentifyCUDAPreference(Caller, Cand1.Function) >
S.IdentifyCUDAPreference(Caller, Cand2.Function);
}
bool HasPS1 = Cand1.Function != nullptr &&
functionHasPassObjectSizeParams(Cand1.Function);
bool HasPS2 = Cand2.Function != nullptr &&
functionHasPassObjectSizeParams(Cand2.Function);
if (HasPS1 != HasPS2 && HasPS1)
return true;
return isBetterMultiversionCandidate(Cand1, Cand2);
}
/// Determine whether two declarations are "equivalent" for the purposes of
/// name lookup and overload resolution. This applies when the same internal/no
/// linkage entity is defined by two modules (probably by textually including
/// the same header). In such a case, we don't consider the declarations to
/// declare the same entity, but we also don't want lookups with both
/// declarations visible to be ambiguous in some cases (this happens when using
/// a modularized libstdc++).
bool Sema::isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B) {
auto *VA = dyn_cast_or_null<ValueDecl>(A);
auto *VB = dyn_cast_or_null<ValueDecl>(B);
if (!VA || !VB)
return false;
// The declarations must be declaring the same name as an internal linkage
// entity in different modules.
if (!VA->getDeclContext()->getRedeclContext()->Equals(
VB->getDeclContext()->getRedeclContext()) ||
getOwningModule(const_cast<ValueDecl *>(VA)) ==
getOwningModule(const_cast<ValueDecl *>(VB)) ||
VA->isExternallyVisible() || VB->isExternallyVisible())
return false;
// Check that the declarations appear to be equivalent.
//
// FIXME: Checking the type isn't really enough to resolve the ambiguity.
// For constants and functions, we should check the initializer or body is
// the same. For non-constant variables, we shouldn't allow it at all.
if (Context.hasSameType(VA->getType(), VB->getType()))
return true;
// Enum constants within unnamed enumerations will have different types, but
// may still be similar enough to be interchangeable for our purposes.
if (auto *EA = dyn_cast<EnumConstantDecl>(VA)) {
if (auto *EB = dyn_cast<EnumConstantDecl>(VB)) {
// Only handle anonymous enums. If the enumerations were named and
// equivalent, they would have been merged to the same type.
auto *EnumA = cast<EnumDecl>(EA->getDeclContext());
auto *EnumB = cast<EnumDecl>(EB->getDeclContext());
if (EnumA->hasNameForLinkage() || EnumB->hasNameForLinkage() ||
!Context.hasSameType(EnumA->getIntegerType(),
EnumB->getIntegerType()))
return false;
// Allow this only if the value is the same for both enumerators.
return llvm::APSInt::isSameValue(EA->getInitVal(), EB->getInitVal());
}
}
// Nothing else is sufficiently similar.
return false;
}
void Sema::diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv) {
Diag(Loc, diag::ext_equivalent_internal_linkage_decl_in_modules) << D;
Module *M = getOwningModule(const_cast<NamedDecl*>(D));
Diag(D->getLocation(), diag::note_equivalent_internal_linkage_decl)
<< !M << (M ? M->getFullModuleName() : "");
for (auto *E : Equiv) {
Module *M = getOwningModule(const_cast<NamedDecl*>(E));
Diag(E->getLocation(), diag::note_equivalent_internal_linkage_decl)
<< !M << (M ? M->getFullModuleName() : "");
}
}
/// Computes the best viable function (C++ 13.3.3)
/// within an overload candidate set.
///
/// \param Loc The location of the function name (or operator symbol) for
/// which overload resolution occurs.
///
/// \param Best If overload resolution was successful or found a deleted
/// function, \p Best points to the candidate function found.
///
/// \returns The result of overload resolution.
OverloadingResult
OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
iterator &Best) {
llvm::SmallVector<OverloadCandidate *, 16> Candidates;
std::transform(begin(), end(), std::back_inserter(Candidates),
[](OverloadCandidate &Cand) { return &Cand; });
// [CUDA] HD->H or HD->D calls are technically not allowed by CUDA but
// are accepted by both clang and NVCC. However, during a particular
// compilation mode only one call variant is viable. We need to
// exclude non-viable overload candidates from consideration based
// only on their host/device attributes. Specifically, if one
// candidate call is WrongSide and the other is SameSide, we ignore
// the WrongSide candidate.
if (S.getLangOpts().CUDA) {
const FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext);
bool ContainsSameSideCandidate =
llvm::any_of(Candidates, [&](OverloadCandidate *Cand) {
return Cand->Function &&
S.IdentifyCUDAPreference(Caller, Cand->Function) ==
Sema::CFP_SameSide;
});
if (ContainsSameSideCandidate) {
auto IsWrongSideCandidate = [&](OverloadCandidate *Cand) {
return Cand->Function &&
S.IdentifyCUDAPreference(Caller, Cand->Function) ==
Sema::CFP_WrongSide;
};
llvm::erase_if(Candidates, IsWrongSideCandidate);
}
}
// Find the best viable function.
Best = end();
for (auto *Cand : Candidates)
if (Cand->Viable)
if (Best == end() ||
isBetterOverloadCandidate(S, *Cand, *Best, Loc, Kind))
Best = Cand;
// If we didn't find any viable functions, abort.
if (Best == end())
return OR_No_Viable_Function;
llvm::SmallVector<const NamedDecl *, 4> EquivalentCands;
// Make sure that this function is better than every other viable
// function. If not, we have an ambiguity.
for (auto *Cand : Candidates) {
if (Cand->Viable && Cand != Best &&
!isBetterOverloadCandidate(S, *Best, *Cand, Loc, Kind)) {
if (S.isEquivalentInternalLinkageDeclaration(Best->Function,
Cand->Function)) {
EquivalentCands.push_back(Cand->Function);
continue;
}
Best = end();
return OR_Ambiguous;
}
}
// Best is the best viable function.
if (Best->Function &&
(Best->Function->isDeleted() ||
S.isFunctionConsideredUnavailable(Best->Function)))
return OR_Deleted;
if (!EquivalentCands.empty())
S.diagnoseEquivalentInternalLinkageDeclarations(Loc, Best->Function,
EquivalentCands);
return OR_Success;
}
namespace {
enum OverloadCandidateKind {
oc_function,
oc_method,
oc_constructor,
oc_implicit_default_constructor,
oc_implicit_copy_constructor,
oc_implicit_move_constructor,
oc_implicit_copy_assignment,
oc_implicit_move_assignment,
oc_inherited_constructor
};
enum OverloadCandidateSelect {
ocs_non_template,
ocs_template,
ocs_described_template,
};
static std::pair<OverloadCandidateKind, OverloadCandidateSelect>
ClassifyOverloadCandidate(Sema &S, NamedDecl *Found, FunctionDecl *Fn,
std::string &Description) {
bool isTemplate = Fn->isTemplateDecl() || Found->isTemplateDecl();
if (FunctionTemplateDecl *FunTmpl = Fn->getPrimaryTemplate()) {
isTemplate = true;
Description = S.getTemplateArgumentBindingsText(
FunTmpl->getTemplateParameters(), *Fn->getTemplateSpecializationArgs());
}
OverloadCandidateSelect Select = [&]() {
if (!Description.empty())
return ocs_described_template;
return isTemplate ? ocs_template : ocs_non_template;
}();
OverloadCandidateKind Kind = [&]() {
if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Fn)) {
if (!Ctor->isImplicit()) {
if (isa<ConstructorUsingShadowDecl>(Found))
return oc_inherited_constructor;
else
return oc_constructor;
}
if (Ctor->isDefaultConstructor())
return oc_implicit_default_constructor;
if (Ctor->isMoveConstructor())
return oc_implicit_move_constructor;
assert(Ctor->isCopyConstructor() &&
"unexpected sort of implicit constructor");
return oc_implicit_copy_constructor;
}
if (CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Fn)) {
// This actually gets spelled 'candidate function' for now, but
// it doesn't hurt to split it out.
if (!Meth->isImplicit())
return oc_method;
if (Meth->isMoveAssignmentOperator())
return oc_implicit_move_assignment;
if (Meth->isCopyAssignmentOperator())
return oc_implicit_copy_assignment;
assert(isa<CXXConversionDecl>(Meth) && "expected conversion");
return oc_method;
}
return oc_function;
}();
return std::make_pair(Kind, Select);
}
void MaybeEmitInheritedConstructorNote(Sema &S, Decl *FoundDecl) {
// FIXME: It'd be nice to only emit a note once per using-decl per overload
// set.
if (auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(FoundDecl))
S.Diag(FoundDecl->getLocation(),
diag::note_ovl_candidate_inherited_constructor)
<< Shadow->getNominatedBaseClass();
}
} // end anonymous namespace
static bool isFunctionAlwaysEnabled(const ASTContext &Ctx,
const FunctionDecl *FD) {
for (auto *EnableIf : FD->specific_attrs<EnableIfAttr>()) {
bool AlwaysTrue;
if (!EnableIf->getCond()->EvaluateAsBooleanCondition(AlwaysTrue, Ctx))
return false;
if (!AlwaysTrue)
return false;
}
return true;
}
/// Returns true if we can take the address of the function.
///
/// \param Complain - If true, we'll emit a diagnostic
/// \param InOverloadResolution - For the purposes of emitting a diagnostic, are
/// we in overload resolution?
/// \param Loc - The location of the statement we're complaining about. Ignored
/// if we're not complaining, or if we're in overload resolution.
static bool checkAddressOfFunctionIsAvailable(Sema &S, const FunctionDecl *FD,
bool Complain,
bool InOverloadResolution,
SourceLocation Loc) {
if (!isFunctionAlwaysEnabled(S.Context, FD)) {
if (Complain) {
if (InOverloadResolution)
S.Diag(FD->getLocStart(),
diag::note_addrof_ovl_candidate_disabled_by_enable_if_attr);
else
S.Diag(Loc, diag::err_addrof_function_disabled_by_enable_if_attr) << FD;
}
return false;
}
auto I = llvm::find_if(FD->parameters(), [](const ParmVarDecl *P) {
return P->hasAttr<PassObjectSizeAttr>();
});
if (I == FD->param_end())
return true;
if (Complain) {
// Add one to ParamNo because it's user-facing
unsigned ParamNo = std::distance(FD->param_begin(), I) + 1;
if (InOverloadResolution)
S.Diag(FD->getLocation(),
diag::note_ovl_candidate_has_pass_object_size_params)
<< ParamNo;
else
S.Diag(Loc, diag::err_address_of_function_with_pass_object_size_params)
<< FD << ParamNo;
}
return false;
}
static bool checkAddressOfCandidateIsAvailable(Sema &S,
const FunctionDecl *FD) {
return checkAddressOfFunctionIsAvailable(S, FD, /*Complain=*/true,
/*InOverloadResolution=*/true,
/*Loc=*/SourceLocation());
}
bool Sema::checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain,
SourceLocation Loc) {
return ::checkAddressOfFunctionIsAvailable(*this, Function, Complain,
/*InOverloadResolution=*/false,
Loc);
}
// Notes the location of an overload candidate.
void Sema::NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType, bool TakingAddress) {
if (TakingAddress && !checkAddressOfCandidateIsAvailable(*this, Fn))
return;
if (Fn->isMultiVersion() && Fn->hasAttr<TargetAttr>() &&
!Fn->getAttr<TargetAttr>()->isDefaultVersion())
return;
std::string FnDesc;
std::pair<OverloadCandidateKind, OverloadCandidateSelect> KSPair =
ClassifyOverloadCandidate(*this, Found, Fn, FnDesc);
PartialDiagnostic PD = PDiag(diag::note_ovl_candidate)
<< (unsigned)KSPair.first << (unsigned)KSPair.second
<< Fn << FnDesc;
HandleFunctionTypeMismatch(PD, Fn->getType(), DestType);
Diag(Fn->getLocation(), PD);
MaybeEmitInheritedConstructorNote(*this, Found);
}
// Notes the location of all overload candidates designated through
// OverloadedExpr
void Sema::NoteAllOverloadCandidates(Expr *OverloadedExpr, QualType DestType,
bool TakingAddress) {
assert(OverloadedExpr->getType() == Context.OverloadTy);
OverloadExpr::FindResult Ovl = OverloadExpr::find(OverloadedExpr);
OverloadExpr *OvlExpr = Ovl.Expression;
for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
IEnd = OvlExpr->decls_end();
I != IEnd; ++I) {
if (FunctionTemplateDecl *FunTmpl =
dyn_cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl()) ) {
NoteOverloadCandidate(*I, FunTmpl->getTemplatedDecl(), DestType,
TakingAddress);
} else if (FunctionDecl *Fun
= dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()) ) {
NoteOverloadCandidate(*I, Fun, DestType, TakingAddress);
}
}
}
/// Diagnoses an ambiguous conversion. The partial diagnostic is the
/// "lead" diagnostic; it will be given two arguments, the source and
/// target types of the conversion.
void ImplicitConversionSequence::DiagnoseAmbiguousConversion(
Sema &S,
SourceLocation CaretLoc,
const PartialDiagnostic &PDiag) const {
S.Diag(CaretLoc, PDiag)
<< Ambiguous.getFromType() << Ambiguous.getToType();
// FIXME: The note limiting machinery is borrowed from
// OverloadCandidateSet::NoteCandidates; there's an opportunity for
// refactoring here.
const OverloadsShown ShowOverloads = S.Diags.getShowOverloads();
unsigned CandsShown = 0;
AmbiguousConversionSequence::const_iterator I, E;
for (I = Ambiguous.begin(), E = Ambiguous.end(); I != E; ++I) {
if (CandsShown >= 4 && ShowOverloads == Ovl_Best)
break;
++CandsShown;
S.NoteOverloadCandidate(I->first, I->second);
}
if (I != E)
S.Diag(SourceLocation(), diag::note_ovl_too_many_candidates) << int(E - I);
}
static void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand,
unsigned I, bool TakingCandidateAddress) {
const ImplicitConversionSequence &Conv = Cand->Conversions[I];
assert(Conv.isBad());
assert(Cand->Function && "for now, candidate must be a function");
FunctionDecl *Fn = Cand->Function;
// There's a conversion slot for the object argument if this is a
// non-constructor method. Note that 'I' corresponds the
// conversion-slot index.
bool isObjectArgument = false;
if (isa<CXXMethodDecl>(Fn) && !isa<CXXConstructorDecl>(Fn)) {
if (I == 0)
isObjectArgument = true;
else
I--;
}
std::string FnDesc;
std::pair<OverloadCandidateKind, OverloadCandidateSelect> FnKindPair =
ClassifyOverloadCandidate(S, Cand->FoundDecl, Fn, FnDesc);
Expr *FromExpr = Conv.Bad.FromExpr;
QualType FromTy = Conv.Bad.getFromType();
QualType ToTy = Conv.Bad.getToType();
if (FromTy == S.Context.OverloadTy) {
assert(FromExpr && "overload set argument came from implicit argument?");
Expr *E = FromExpr->IgnoreParens();
if (isa<UnaryOperator>(E))
E = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
DeclarationName Name = cast<OverloadExpr>(E)->getName();
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_overload)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << ToTy
<< Name << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
// Do some hand-waving analysis to see if the non-viability is due
// to a qualifier mismatch.
CanQualType CFromTy = S.Context.getCanonicalType(FromTy);
CanQualType CToTy = S.Context.getCanonicalType(ToTy);
if (CanQual<ReferenceType> RT = CToTy->getAs<ReferenceType>())
CToTy = RT->getPointeeType();
else {
// TODO: detect and diagnose the full richness of const mismatches.
if (CanQual<PointerType> FromPT = CFromTy->getAs<PointerType>())
if (CanQual<PointerType> ToPT = CToTy->getAs<PointerType>()) {
CFromTy = FromPT->getPointeeType();
CToTy = ToPT->getPointeeType();
}
}
if (CToTy.getUnqualifiedType() == CFromTy.getUnqualifiedType() &&
!CToTy.isAtLeastAsQualifiedAs(CFromTy)) {
Qualifiers FromQs = CFromTy.getQualifiers();
Qualifiers ToQs = CToTy.getQualifiers();
if (FromQs.getAddressSpace() != ToQs.getAddressSpace()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_addrspace)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
<< ToTy << (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_ownership)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
<< FromQs.getObjCLifetime() << ToQs.getObjCLifetime()
<< (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
if (FromQs.getObjCGCAttr() != ToQs.getObjCGCAttr()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_gc)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
<< FromQs.getObjCGCAttr() << ToQs.getObjCGCAttr()
<< (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
if (FromQs.hasUnaligned() != ToQs.hasUnaligned()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_unaligned)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
<< FromQs.hasUnaligned() << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers();
assert(CVR && "unexpected qualifiers mismatch");
if (isObjectArgument) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr_this)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
<< (CVR - 1);
} else {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
<< (CVR - 1) << I + 1;
}
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
// Special diagnostic for failure to convert an initializer list, since
// telling the user that it has type void is not useful.
if (FromExpr && isa<InitListExpr>(FromExpr)) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_list_argument)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
<< ToTy << (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
// Diagnose references or pointers to incomplete types differently,
// since it's far from impossible that the incompleteness triggered
// the failure.
QualType TempFromTy = FromTy.getNonReferenceType();
if (const PointerType *PTy = TempFromTy->getAs<PointerType>())
TempFromTy = PTy->getPointeeType();
if (TempFromTy->isIncompleteType()) {
// Emit the generic diagnostic and, optionally, add the hints to it.
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_conv_incomplete)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
<< ToTy << (unsigned)isObjectArgument << I + 1
<< (unsigned)(Cand->Fix.Kind);
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
// Diagnose base -> derived pointer conversions.
unsigned BaseToDerivedConversion = 0;
if (const PointerType *FromPtrTy = FromTy->getAs<PointerType>()) {
if (const PointerType *ToPtrTy = ToTy->getAs<PointerType>()) {
if (ToPtrTy->getPointeeType().isAtLeastAsQualifiedAs(
FromPtrTy->getPointeeType()) &&
!FromPtrTy->getPointeeType()->isIncompleteType() &&
!ToPtrTy->getPointeeType()->isIncompleteType() &&
S.IsDerivedFrom(SourceLocation(), ToPtrTy->getPointeeType(),
FromPtrTy->getPointeeType()))
BaseToDerivedConversion = 1;
}
} else if (const ObjCObjectPointerType *FromPtrTy
= FromTy->getAs<ObjCObjectPointerType>()) {
if (const ObjCObjectPointerType *ToPtrTy
= ToTy->getAs<ObjCObjectPointerType>())
if (const ObjCInterfaceDecl *FromIface = FromPtrTy->getInterfaceDecl())
if (const ObjCInterfaceDecl *ToIface = ToPtrTy->getInterfaceDecl())
if (ToPtrTy->getPointeeType().isAtLeastAsQualifiedAs(
FromPtrTy->getPointeeType()) &&
FromIface->isSuperClassOf(ToIface))
BaseToDerivedConversion = 2;
} else if (const ReferenceType *ToRefTy = ToTy->getAs<ReferenceType>()) {
if (ToRefTy->getPointeeType().isAtLeastAsQualifiedAs(FromTy) &&
!FromTy->isIncompleteType() &&
!ToRefTy->getPointeeType()->isIncompleteType() &&
S.IsDerivedFrom(SourceLocation(), ToRefTy->getPointeeType(), FromTy)) {
BaseToDerivedConversion = 3;
} else if (ToTy->isLValueReferenceType() && !FromExpr->isLValue() &&
ToTy.getNonReferenceType().getCanonicalType() ==
FromTy.getNonReferenceType().getCanonicalType()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_lvalue)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (unsigned)isObjectArgument << I + 1
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange());
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
}
if (BaseToDerivedConversion) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_base_to_derived_conv)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange())
<< (BaseToDerivedConversion - 1) << FromTy << ToTy << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
if (isa<ObjCObjectPointerType>(CFromTy) &&
isa<PointerType>(CToTy)) {
Qualifiers FromQs = CFromTy.getQualifiers();
Qualifiers ToQs = CToTy.getQualifiers();
if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_arc_conv)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
<< FnDesc << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
<< FromTy << ToTy << (unsigned)isObjectArgument << I + 1;
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
}
if (TakingCandidateAddress &&
!checkAddressOfCandidateIsAvailable(S, Cand->Function))
return;
// Emit the generic diagnostic and, optionally, add the hints to it.
PartialDiagnostic FDiag = S.PDiag(diag::note_ovl_candidate_bad_conv);
FDiag << (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (FromExpr ? FromExpr->getSourceRange() : SourceRange()) << FromTy
<< ToTy << (unsigned)isObjectArgument << I + 1
<< (unsigned)(Cand->Fix.Kind);
// If we can fix the conversion, suggest the FixIts.
for (std::vector<FixItHint>::iterator HI = Cand->Fix.Hints.begin(),
HE = Cand->Fix.Hints.end(); HI != HE; ++HI)
FDiag << *HI;
S.Diag(Fn->getLocation(), FDiag);
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
}
/// Additional arity mismatch diagnosis specific to a function overload
/// candidates. This is not covered by the more general DiagnoseArityMismatch()
/// over a candidate in any candidate set.
static bool CheckArityMismatch(Sema &S, OverloadCandidate *Cand,
unsigned NumArgs) {
FunctionDecl *Fn = Cand->Function;
unsigned MinParams = Fn->getMinRequiredArguments();
// With invalid overloaded operators, it's possible that we think we
// have an arity mismatch when in fact it looks like we have the
// right number of arguments, because only overloaded operators have
// the weird behavior of overloading member and non-member functions.
// Just don't report anything.
if (Fn->isInvalidDecl() &&
Fn->getDeclName().getNameKind() == DeclarationName::CXXOperatorName)
return true;
if (NumArgs < MinParams) {
assert((Cand->FailureKind == ovl_fail_too_few_arguments) ||
(Cand->FailureKind == ovl_fail_bad_deduction &&
Cand->DeductionFailure.Result == Sema::TDK_TooFewArguments));
} else {
assert((Cand->FailureKind == ovl_fail_too_many_arguments) ||
(Cand->FailureKind == ovl_fail_bad_deduction &&
Cand->DeductionFailure.Result == Sema::TDK_TooManyArguments));
}
return false;
}
/// General arity mismatch diagnosis over a candidate in a candidate set.
static void DiagnoseArityMismatch(Sema &S, NamedDecl *Found, Decl *D,
unsigned NumFormalArgs) {
assert(isa<FunctionDecl>(D) &&
"The templated declaration should at least be a function"
" when diagnosing bad template argument deduction due to too many"
" or too few arguments");
FunctionDecl *Fn = cast<FunctionDecl>(D);
// TODO: treat calls to a missing default constructor as a special case
const FunctionProtoType *FnTy = Fn->getType()->getAs<FunctionProtoType>();
unsigned MinParams = Fn->getMinRequiredArguments();
// at least / at most / exactly
unsigned mode, modeCount;
if (NumFormalArgs < MinParams) {
if (MinParams != FnTy->getNumParams() || FnTy->isVariadic() ||
FnTy->isTemplateVariadic())
mode = 0; // "at least"
else
mode = 2; // "exactly"
modeCount = MinParams;
} else {
if (MinParams != FnTy->getNumParams())
mode = 1; // "at most"
else
mode = 2; // "exactly"
modeCount = FnTy->getNumParams();
}
std::string Description;
std::pair<OverloadCandidateKind, OverloadCandidateSelect> FnKindPair =
ClassifyOverloadCandidate(S, Found, Fn, Description);
if (modeCount == 1 && Fn->getParamDecl(0)->getDeclName())
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity_one)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
<< Description << mode << Fn->getParamDecl(0) << NumFormalArgs;
else
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second
<< Description << mode << modeCount << NumFormalArgs;
MaybeEmitInheritedConstructorNote(S, Found);
}
/// Arity mismatch diagnosis specific to a function overload candidate.
static void DiagnoseArityMismatch(Sema &S, OverloadCandidate *Cand,
unsigned NumFormalArgs) {
if (!CheckArityMismatch(S, Cand, NumFormalArgs))
DiagnoseArityMismatch(S, Cand->FoundDecl, Cand->Function, NumFormalArgs);
}
static TemplateDecl *getDescribedTemplate(Decl *Templated) {
if (TemplateDecl *TD = Templated->getDescribedTemplate())
return TD;
llvm_unreachable("Unsupported: Getting the described template declaration"
" for bad deduction diagnosis");
}
/// Diagnose a failed template-argument deduction.
static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
DeductionFailureInfo &DeductionFailure,
unsigned NumArgs,
bool TakingCandidateAddress) {
TemplateParameter Param = DeductionFailure.getTemplateParameter();
NamedDecl *ParamD;
(ParamD = Param.dyn_cast<TemplateTypeParmDecl*>()) ||
(ParamD = Param.dyn_cast<NonTypeTemplateParmDecl*>()) ||
(ParamD = Param.dyn_cast<TemplateTemplateParmDecl*>());
switch (DeductionFailure.Result) {
case Sema::TDK_Success:
llvm_unreachable("TDK_success while diagnosing bad deduction");
case Sema::TDK_Incomplete: {
assert(ParamD && "no parameter found for incomplete deduction result");
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_incomplete_deduction)
<< ParamD->getDeclName();
MaybeEmitInheritedConstructorNote(S, Found);
return;
}
case Sema::TDK_IncompletePack: {
assert(ParamD && "no parameter found for incomplete deduction result");
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_incomplete_deduction_pack)
<< ParamD->getDeclName()
<< (DeductionFailure.getFirstArg()->pack_size() + 1)
<< *DeductionFailure.getFirstArg();
MaybeEmitInheritedConstructorNote(S, Found);
return;
}
case Sema::TDK_Underqualified: {
assert(ParamD && "no parameter found for bad qualifiers deduction result");
TemplateTypeParmDecl *TParam = cast<TemplateTypeParmDecl>(ParamD);
QualType Param = DeductionFailure.getFirstArg()->getAsType();
// Param will have been canonicalized, but it should just be a
// qualified version of ParamD, so move the qualifiers to that.
QualifierCollector Qs;
Qs.strip(Param);
QualType NonCanonParam = Qs.apply(S.Context, TParam->getTypeForDecl());
assert(S.Context.hasSameType(Param, NonCanonParam));
// Arg has also been canonicalized, but there's nothing we can do
// about that. It also doesn't matter as much, because it won't
// have any template parameters in it (because deduction isn't
// done on dependent types).
QualType Arg = DeductionFailure.getSecondArg()->getAsType();
S.Diag(Templated->getLocation(), diag::note_ovl_candidate_underqualified)
<< ParamD->getDeclName() << Arg << NonCanonParam;
MaybeEmitInheritedConstructorNote(S, Found);
return;
}
case Sema::TDK_Inconsistent: {
assert(ParamD && "no parameter found for inconsistent deduction result");
int which = 0;
if (isa<TemplateTypeParmDecl>(ParamD))
which = 0;
else if (isa<NonTypeTemplateParmDecl>(ParamD)) {
// Deduction might have failed because we deduced arguments of two
// different types for a non-type template parameter.
// FIXME: Use a different TDK value for this.
QualType T1 =
DeductionFailure.getFirstArg()->getNonTypeTemplateArgumentType();
QualType T2 =
DeductionFailure.getSecondArg()->getNonTypeTemplateArgumentType();
if (!S.Context.hasSameType(T1, T2)) {
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_inconsistent_deduction_types)
<< ParamD->getDeclName() << *DeductionFailure.getFirstArg() << T1
<< *DeductionFailure.getSecondArg() << T2;
MaybeEmitInheritedConstructorNote(S, Found);
return;
}
which = 1;
} else {
which = 2;
}
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_inconsistent_deduction)
<< which << ParamD->getDeclName() << *DeductionFailure.getFirstArg()
<< *DeductionFailure.getSecondArg();
MaybeEmitInheritedConstructorNote(S, Found);
return;
}
case Sema::TDK_InvalidExplicitArguments:
assert(ParamD && "no parameter found for invalid explicit arguments");
if (ParamD->getDeclName())
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_explicit_arg_mismatch_named)
<< ParamD->getDeclName();
else {
int index = 0;
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ParamD))
index = TTP->getIndex();
else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(ParamD))
index = NTTP->getIndex();
else
index = cast<TemplateTemplateParmDecl>(ParamD)->getIndex();
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_explicit_arg_mismatch_unnamed)
<< (index + 1);
}
MaybeEmitInheritedConstructorNote(S, Found);
return;
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
DiagnoseArityMismatch(S, Found, Templated, NumArgs);
return;
case Sema::TDK_InstantiationDepth:
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_instantiation_depth);
MaybeEmitInheritedConstructorNote(S, Found);
return;
case Sema::TDK_SubstitutionFailure: {
// Format the template argument list into the argument string.
SmallString<128> TemplateArgString;
if (TemplateArgumentList *Args =
DeductionFailure.getTemplateArgumentList()) {
TemplateArgString = " ";
TemplateArgString += S.getTemplateArgumentBindingsText(
getDescribedTemplate(Templated)->getTemplateParameters(), *Args);
}
// If this candidate was disabled by enable_if, say so.
PartialDiagnosticAt *PDiag = DeductionFailure.getSFINAEDiagnostic();
if (PDiag && PDiag->second.getDiagID() ==
diag::err_typename_nested_not_found_enable_if) {
// FIXME: Use the source range of the condition, and the fully-qualified
// name of the enable_if template. These are both present in PDiag.
S.Diag(PDiag->first, diag::note_ovl_candidate_disabled_by_enable_if)
<< "'enable_if'" << TemplateArgString;
return;
}
// We found a specific requirement that disabled the enable_if.
if (PDiag && PDiag->second.getDiagID() ==
diag::err_typename_nested_not_found_requirement) {
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_disabled_by_requirement)
<< PDiag->second.getStringArg(0) << TemplateArgString;
return;
}
// Format the SFINAE diagnostic into the argument string.
// FIXME: Add a general mechanism to include a PartialDiagnostic *'s
// formatted message in another diagnostic.
SmallString<128> SFINAEArgString;
SourceRange R;
if (PDiag) {
SFINAEArgString = ": ";
R = SourceRange(PDiag->first, PDiag->first);
PDiag->second.EmitToString(S.getDiagnostics(), SFINAEArgString);
}
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_substitution_failure)
<< TemplateArgString << SFINAEArgString << R;
MaybeEmitInheritedConstructorNote(S, Found);
return;
}
case Sema::TDK_DeducedMismatch:
case Sema::TDK_DeducedMismatchNested: {
// Format the template argument list into the argument string.
SmallString<128> TemplateArgString;
if (TemplateArgumentList *Args =
DeductionFailure.getTemplateArgumentList()) {
TemplateArgString = " ";
TemplateArgString += S.getTemplateArgumentBindingsText(
getDescribedTemplate(Templated)->getTemplateParameters(), *Args);
}
S.Diag(Templated->getLocation(), diag::note_ovl_candidate_deduced_mismatch)
<< (*DeductionFailure.getCallArgIndex() + 1)
<< *DeductionFailure.getFirstArg() << *DeductionFailure.getSecondArg()
<< TemplateArgString
<< (DeductionFailure.Result == Sema::TDK_DeducedMismatchNested);
break;
}
case Sema::TDK_NonDeducedMismatch: {
// FIXME: Provide a source location to indicate what we couldn't match.
TemplateArgument FirstTA = *DeductionFailure.getFirstArg();
TemplateArgument SecondTA = *DeductionFailure.getSecondArg();
if (FirstTA.getKind() == TemplateArgument::Template &&
SecondTA.getKind() == TemplateArgument::Template) {
TemplateName FirstTN = FirstTA.getAsTemplate();
TemplateName SecondTN = SecondTA.getAsTemplate();
if (FirstTN.getKind() == TemplateName::Template &&
SecondTN.getKind() == TemplateName::Template) {
if (FirstTN.getAsTemplateDecl()->getName() ==
SecondTN.getAsTemplateDecl()->getName()) {
// FIXME: This fixes a bad diagnostic where both templates are named
// the same. This particular case is a bit difficult since:
// 1) It is passed as a string to the diagnostic printer.
// 2) The diagnostic printer only attempts to find a better
// name for types, not decls.
// Ideally, this should folded into the diagnostic printer.
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_non_deduced_mismatch_qualified)
<< FirstTN.getAsTemplateDecl() << SecondTN.getAsTemplateDecl();
return;
}
}
}
if (TakingCandidateAddress && isa<FunctionDecl>(Templated) &&
!checkAddressOfCandidateIsAvailable(S, cast<FunctionDecl>(Templated)))
return;
// FIXME: For generic lambda parameters, check if the function is a lambda
// call operator, and if so, emit a prettier and more informative
// diagnostic that mentions 'auto' and lambda in addition to
// (or instead of?) the canonical template type parameters.
S.Diag(Templated->getLocation(),
diag::note_ovl_candidate_non_deduced_mismatch)
<< FirstTA << SecondTA;
return;
}
// TODO: diagnose these individually, then kill off
// note_ovl_candidate_bad_deduction, which is uselessly vague.
case Sema::TDK_MiscellaneousDeductionFailure:
S.Diag(Templated->getLocation(), diag::note_ovl_candidate_bad_deduction);
MaybeEmitInheritedConstructorNote(S, Found);
return;
case Sema::TDK_CUDATargetMismatch:
S.Diag(Templated->getLocation(),
diag::note_cuda_ovl_candidate_target_mismatch);
return;
}
}
/// Diagnose a failed template-argument deduction, for function calls.
static void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
unsigned NumArgs,
bool TakingCandidateAddress) {
unsigned TDK = Cand->DeductionFailure.Result;
if (TDK == Sema::TDK_TooFewArguments || TDK == Sema::TDK_TooManyArguments) {
if (CheckArityMismatch(S, Cand, NumArgs))
return;
}
DiagnoseBadDeduction(S, Cand->FoundDecl, Cand->Function, // pattern
Cand->DeductionFailure, NumArgs, TakingCandidateAddress);
}
/// CUDA: diagnose an invalid call across targets.
static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
FunctionDecl *Caller = cast<FunctionDecl>(S.CurContext);
FunctionDecl *Callee = Cand->Function;
Sema::CUDAFunctionTarget CallerTarget = S.IdentifyCUDATarget(Caller),
CalleeTarget = S.IdentifyCUDATarget(Callee);
std::string FnDesc;
std::pair<OverloadCandidateKind, OverloadCandidateSelect> FnKindPair =
ClassifyOverloadCandidate(S, Cand->FoundDecl, Callee, FnDesc);
S.Diag(Callee->getLocation(), diag::note_ovl_candidate_bad_target)
<< (unsigned)FnKindPair.first << (unsigned)ocs_non_template
<< FnDesc /* Ignored */
<< CalleeTarget << CallerTarget;
// This could be an implicit constructor for which we could not infer the
// target due to a collsion. Diagnose that case.
CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Callee);
if (Meth != nullptr && Meth->isImplicit()) {
CXXRecordDecl *ParentClass = Meth->getParent();
Sema::CXXSpecialMember CSM;
switch (FnKindPair.first) {
default:
return;
case oc_implicit_default_constructor:
CSM = Sema::CXXDefaultConstructor;
break;
case oc_implicit_copy_constructor:
CSM = Sema::CXXCopyConstructor;
break;
case oc_implicit_move_constructor:
CSM = Sema::CXXMoveConstructor;
break;
case oc_implicit_copy_assignment:
CSM = Sema::CXXCopyAssignment;
break;
case oc_implicit_move_assignment:
CSM = Sema::CXXMoveAssignment;
break;
};
bool ConstRHS = false;
if (Meth->getNumParams()) {
if (const ReferenceType *RT =
Meth->getParamDecl(0)->getType()->getAs<ReferenceType>()) {
ConstRHS = RT->getPointeeType().isConstQualified();
}
}
S.inferCUDATargetForImplicitSpecialMember(ParentClass, CSM, Meth,
/* ConstRHS */ ConstRHS,
/* Diagnose */ true);
}
}
static void DiagnoseFailedEnableIfAttr(Sema &S, OverloadCandidate *Cand) {
FunctionDecl *Callee = Cand->Function;
EnableIfAttr *Attr = static_cast<EnableIfAttr*>(Cand->DeductionFailure.Data);
S.Diag(Callee->getLocation(),
diag::note_ovl_candidate_disabled_by_function_cond_attr)
<< Attr->getCond()->getSourceRange() << Attr->getMessage();
}
static void DiagnoseOpenCLExtensionDisabled(Sema &S, OverloadCandidate *Cand) {
FunctionDecl *Callee = Cand->Function;
S.Diag(Callee->getLocation(),
diag::note_ovl_candidate_disabled_by_extension);
}
/// Generates a 'note' diagnostic for an overload candidate. We've
/// already generated a primary error at the call site.
///
/// It really does need to be a single diagnostic with its caret
/// pointed at the candidate declaration. Yes, this creates some
/// major challenges of technical writing. Yes, this makes pointing
/// out problems with specific arguments quite awkward. It's still
/// better than generating twenty screens of text for every failed
/// overload.
///
/// It would be great to be able to express per-candidate problems
/// more richly for those diagnostic clients that cared, but we'd
/// still have to be just as careful with the default diagnostics.
static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
unsigned NumArgs,
bool TakingCandidateAddress) {
FunctionDecl *Fn = Cand->Function;
// Note deleted candidates, but only if they're viable.
if (Cand->Viable) {
if (Fn->isDeleted() || S.isFunctionConsideredUnavailable(Fn)) {
std::string FnDesc;
std::pair<OverloadCandidateKind, OverloadCandidateSelect> FnKindPair =
ClassifyOverloadCandidate(S, Cand->FoundDecl, Fn, FnDesc);
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_deleted)
<< (unsigned)FnKindPair.first << (unsigned)FnKindPair.second << FnDesc
<< (Fn->isDeleted() ? (Fn->isDeletedAsWritten() ? 1 : 2) : 0);
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
// We don't really have anything else to say about viable candidates.
S.NoteOverloadCandidate(Cand->FoundDecl, Fn);
return;
}
switch (Cand->FailureKind) {
case ovl_fail_too_many_arguments:
case ovl_fail_too_few_arguments:
return DiagnoseArityMismatch(S, Cand, NumArgs);
case ovl_fail_bad_deduction:
return DiagnoseBadDeduction(S, Cand, NumArgs,
TakingCandidateAddress);
case ovl_fail_illegal_constructor: {
S.Diag(Fn->getLocation(), diag::note_ovl_candidate_illegal_constructor)
<< (Fn->getPrimaryTemplate() ? 1 : 0);
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
}
case ovl_fail_trivial_conversion:
case ovl_fail_bad_final_conversion:
case ovl_fail_final_conversion_not_exact:
return S.NoteOverloadCandidate(Cand->FoundDecl, Fn);
case ovl_fail_bad_conversion: {
unsigned I = (Cand->IgnoreObjectArgument ? 1 : 0);
for (unsigned N = Cand->Conversions.size(); I != N; ++I)
if (Cand->Conversions[I].isBad())
return DiagnoseBadConversion(S, Cand, I, TakingCandidateAddress);
// FIXME: this currently happens when we're called from SemaInit
// when user-conversion overload fails. Figure out how to handle
// those conditions and diagnose them well.
return S.NoteOverloadCandidate(Cand->FoundDecl, Fn);
}
case ovl_fail_bad_target:
return DiagnoseBadTarget(S, Cand);
case ovl_fail_enable_if:
return DiagnoseFailedEnableIfAttr(S, Cand);
case ovl_fail_ext_disabled:
return DiagnoseOpenCLExtensionDisabled(S, Cand);
case ovl_fail_inhctor_slice:
// It's generally not interesting to note copy/move constructors here.
if (cast<CXXConstructorDecl>(Fn)->isCopyOrMoveConstructor())
return;
S.Diag(Fn->getLocation(),
diag::note_ovl_candidate_inherited_constructor_slice)
<< (Fn->getPrimaryTemplate() ? 1 : 0)
<< Fn->getParamDecl(0)->getType()->isRValueReferenceType();
MaybeEmitInheritedConstructorNote(S, Cand->FoundDecl);
return;
case ovl_fail_addr_not_available: {
bool Available = checkAddressOfCandidateIsAvailable(S, Cand->Function);
(void)Available;
assert(!Available);
break;
}
case ovl_non_default_multiversion_function:
// Do nothing, these should simply be ignored.
break;
}
}
static void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) {
// Desugar the type of the surrogate down to a function type,
// retaining as many typedefs as possible while still showing
// the function type (and, therefore, its parameter types).
QualType FnType = Cand->Surrogate->getConversionType();
bool isLValueReference = false;
bool isRValueReference = false;
bool isPointer = false;
if (const LValueReferenceType *FnTypeRef =
FnType->getAs<LValueReferenceType>()) {
FnType = FnTypeRef->getPointeeType();
isLValueReference = true;
} else if (const RValueReferenceType *FnTypeRef =
FnType->getAs<RValueReferenceType>()) {
FnType = FnTypeRef->getPointeeType();
isRValueReference = true;
}
if (const PointerType *FnTypePtr = FnType->getAs<PointerType>()) {
FnType = FnTypePtr->getPointeeType();
isPointer = true;
}
// Desugar down to a function type.
FnType = QualType(FnType->getAs<FunctionType>(), 0);
// Reconstruct the pointer/reference as appropriate.
if (isPointer) FnType = S.Context.getPointerType(FnType);
if (isRValueReference) FnType = S.Context.getRValueReferenceType(FnType);
if (isLValueReference) FnType = S.Context.getLValueReferenceType(FnType);
S.Diag(Cand->Surrogate->getLocation(), diag::note_ovl_surrogate_cand)
<< FnType;
}
static void NoteBuiltinOperatorCandidate(Sema &S, StringRef Opc,
SourceLocation OpLoc,
OverloadCandidate *Cand) {
assert(Cand->Conversions.size() <= 2 && "builtin operator is not binary");
std::string TypeStr("operator");
TypeStr += Opc;
TypeStr += "(";
TypeStr += Cand->BuiltinParamTypes[0].getAsString();
if (Cand->Conversions.size() == 1) {
TypeStr += ")";
S.Diag(OpLoc, diag::note_ovl_builtin_unary_candidate) << TypeStr;
} else {
TypeStr += ", ";
TypeStr += Cand->BuiltinParamTypes[1].getAsString();
TypeStr += ")";
S.Diag(OpLoc, diag::note_ovl_builtin_binary_candidate) << TypeStr;
}
}
static void NoteAmbiguousUserConversions(Sema &S, SourceLocation OpLoc,
OverloadCandidate *Cand) {
for (const ImplicitConversionSequence &ICS : Cand->Conversions) {
if (ICS.isBad()) break; // all meaningless after first invalid
if (!ICS.isAmbiguous()) continue;
ICS.DiagnoseAmbiguousConversion(
S, OpLoc, S.PDiag(diag::note_ambiguous_type_conversion));
}
}
static SourceLocation GetLocationForCandidate(const OverloadCandidate *Cand) {
if (Cand->Function)
return Cand->Function->getLocation();
if (Cand->IsSurrogate)
return Cand->Surrogate->getLocation();
return SourceLocation();
}
static unsigned RankDeductionFailure(const DeductionFailureInfo &DFI) {
switch ((Sema::TemplateDeductionResult)DFI.Result) {
case Sema::TDK_Success:
case Sema::TDK_NonDependentConversionFailure:
llvm_unreachable("non-deduction failure while diagnosing bad deduction");
case Sema::TDK_Invalid:
case Sema::TDK_Incomplete:
case Sema::TDK_IncompletePack:
return 1;
case Sema::TDK_Underqualified:
case Sema::TDK_Inconsistent:
return 2;
case Sema::TDK_SubstitutionFailure:
case Sema::TDK_DeducedMismatch:
case Sema::TDK_DeducedMismatchNested:
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_MiscellaneousDeductionFailure:
case Sema::TDK_CUDATargetMismatch:
return 3;
case Sema::TDK_InstantiationDepth:
return 4;
case Sema::TDK_InvalidExplicitArguments:
return 5;
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
return 6;
}
llvm_unreachable("Unhandled deduction result");
}
namespace {
struct CompareOverloadCandidatesForDisplay {
Sema &S;
SourceLocation Loc;
size_t NumArgs;
OverloadCandidateSet::CandidateSetKind CSK;
CompareOverloadCandidatesForDisplay(
Sema &S, SourceLocation Loc, size_t NArgs,
OverloadCandidateSet::CandidateSetKind CSK)
: S(S), NumArgs(NArgs), CSK(CSK) {}
bool operator()(const OverloadCandidate *L,
const OverloadCandidate *R) {
// Fast-path this check.
if (L == R) return false;
// Order first by viability.
if (L->Viable) {
if (!R->Viable) return true;
// TODO: introduce a tri-valued comparison for overload
// candidates. Would be more worthwhile if we had a sort
// that could exploit it.
if (isBetterOverloadCandidate(S, *L, *R, SourceLocation(), CSK))
return true;
if (isBetterOverloadCandidate(S, *R, *L, SourceLocation(), CSK))
return false;
} else if (R->Viable)
return false;
assert(L->Viable == R->Viable);
// Criteria by which we can sort non-viable candidates:
if (!L->Viable) {
// 1. Arity mismatches come after other candidates.
if (L->FailureKind == ovl_fail_too_many_arguments ||
L->FailureKind == ovl_fail_too_few_arguments) {
if (R->FailureKind == ovl_fail_too_many_arguments ||
R->FailureKind == ovl_fail_too_few_arguments) {
int LDist = std::abs((int)L->getNumParams() - (int)NumArgs);
int RDist = std::abs((int)R->getNumParams() - (int)NumArgs);
if (LDist == RDist) {
if (L->FailureKind == R->FailureKind)
// Sort non-surrogates before surrogates.
return !L->IsSurrogate && R->IsSurrogate;
// Sort candidates requiring fewer parameters than there were
// arguments given after candidates requiring more parameters
// than there were arguments given.
return L->FailureKind == ovl_fail_too_many_arguments;
}
return LDist < RDist;
}
return false;
}
if (R->FailureKind == ovl_fail_too_many_arguments ||
R->FailureKind == ovl_fail_too_few_arguments)
return true;
// 2. Bad conversions come first and are ordered by the number
// of bad conversions and quality of good conversions.
if (L->FailureKind == ovl_fail_bad_conversion) {
if (R->FailureKind != ovl_fail_bad_conversion)
return true;
// The conversion that can be fixed with a smaller number of changes,
// comes first.
unsigned numLFixes = L->Fix.NumConversionsFixed;
unsigned numRFixes = R->Fix.NumConversionsFixed;
numLFixes = (numLFixes == 0) ? UINT_MAX : numLFixes;
numRFixes = (numRFixes == 0) ? UINT_MAX : numRFixes;
if (numLFixes != numRFixes) {
return numLFixes < numRFixes;
}
// If there's any ordering between the defined conversions...
// FIXME: this might not be transitive.
assert(L->Conversions.size() == R->Conversions.size());
int leftBetter = 0;
unsigned I = (L->IgnoreObjectArgument || R->IgnoreObjectArgument);
for (unsigned E = L->Conversions.size(); I != E; ++I) {
switch (CompareImplicitConversionSequences(S, Loc,
L->Conversions[I],
R->Conversions[I])) {
case ImplicitConversionSequence::Better:
leftBetter++;
break;
case ImplicitConversionSequence::Worse:
leftBetter--;
break;
case ImplicitConversionSequence::Indistinguishable:
break;
}
}
if (leftBetter > 0) return true;
if (leftBetter < 0) return false;
} else if (R->FailureKind == ovl_fail_bad_conversion)
return false;
if (L->FailureKind == ovl_fail_bad_deduction) {
if (R->FailureKind != ovl_fail_bad_deduction)
return true;
if (L->DeductionFailure.Result != R->DeductionFailure.Result)
return RankDeductionFailure(L->DeductionFailure)
< RankDeductionFailure(R->DeductionFailure);
} else if (R->FailureKind == ovl_fail_bad_deduction)
return false;
// TODO: others?
}
// Sort everything else by location.
SourceLocation LLoc = GetLocationForCandidate(L);
SourceLocation RLoc = GetLocationForCandidate(R);
// Put candidates without locations (e.g. builtins) at the end.
if (LLoc.isInvalid()) return false;
if (RLoc.isInvalid()) return true;
return S.SourceMgr.isBeforeInTranslationUnit(LLoc, RLoc);
}
};
}
/// CompleteNonViableCandidate - Normally, overload resolution only
/// computes up to the first bad conversion. Produces the FixIt set if
/// possible.
static void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
ArrayRef<Expr *> Args) {
assert(!Cand->Viable);
// Don't do anything on failures other than bad conversion.
if (Cand->FailureKind != ovl_fail_bad_conversion) return;
// We only want the FixIts if all the arguments can be corrected.
bool Unfixable = false;
// Use a implicit copy initialization to check conversion fixes.
Cand->Fix.setConversionChecker(TryCopyInitialization);
// Attempt to fix the bad conversion.
unsigned ConvCount = Cand->Conversions.size();
for (unsigned ConvIdx = (Cand->IgnoreObjectArgument ? 1 : 0); /**/;
++ConvIdx) {
assert(ConvIdx != ConvCount && "no bad conversion in candidate");
if (Cand->Conversions[ConvIdx].isInitialized() &&
Cand->Conversions[ConvIdx].isBad()) {
Unfixable = !Cand->TryToFixBadConversion(ConvIdx, S);
break;
}
}
// FIXME: this should probably be preserved from the overload
// operation somehow.
bool SuppressUserConversions = false;
unsigned ConvIdx = 0;
ArrayRef<QualType> ParamTypes;
if (Cand->IsSurrogate) {
QualType ConvType
= Cand->Surrogate->getConversionType().getNonReferenceType();
if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
ConvType = ConvPtrType->getPointeeType();
ParamTypes = ConvType->getAs<FunctionProtoType>()->getParamTypes();
// Conversion 0 is 'this', which doesn't have a corresponding argument.
ConvIdx = 1;
} else if (Cand->Function) {
ParamTypes =
Cand->Function->getType()->getAs<FunctionProtoType>()->getParamTypes();
if (isa<CXXMethodDecl>(Cand->Function) &&
!isa<CXXConstructorDecl>(Cand->Function)) {
// Conversion 0 is 'this', which doesn't have a corresponding argument.
ConvIdx = 1;
}
} else {
// Builtin operator.
assert(ConvCount <= 3);
ParamTypes = Cand->BuiltinParamTypes;
}
// Fill in the rest of the conversions.
for (unsigned ArgIdx = 0; ConvIdx != ConvCount; ++ConvIdx, ++ArgIdx) {
if (Cand->Conversions[ConvIdx].isInitialized()) {
// We've already checked this conversion.
} else if (ArgIdx < ParamTypes.size()) {
if (ParamTypes[ArgIdx]->isDependentType())
Cand->Conversions[ConvIdx].setAsIdentityConversion(
Args[ArgIdx]->getType());
else {
Cand->Conversions[ConvIdx] =
TryCopyInitialization(S, Args[ArgIdx], ParamTypes[ArgIdx],
SuppressUserConversions,
/*InOverloadResolution=*/true,
/*AllowObjCWritebackConversion=*/
S.getLangOpts().ObjCAutoRefCount);
// Store the FixIt in the candidate if it exists.
if (!Unfixable && Cand->Conversions[ConvIdx].isBad())
Unfixable = !Cand->TryToFixBadConversion(ConvIdx, S);
}
} else
Cand->Conversions[ConvIdx].setEllipsis();
}
}
/// When overload resolution fails, prints diagnostic messages containing the
/// candidates in the candidate set.
void OverloadCandidateSet::NoteCandidates(
Sema &S, OverloadCandidateDisplayKind OCD, ArrayRef<Expr *> Args,
StringRef Opc, SourceLocation OpLoc,
llvm::function_ref<bool(OverloadCandidate &)> Filter) {
// Sort the candidates by viability and position. Sorting directly would
// be prohibitive, so we make a set of pointers and sort those.
SmallVector<OverloadCandidate*, 32> Cands;
if (OCD == OCD_AllCandidates) Cands.reserve(size());
for (iterator Cand = begin(), LastCand = end(); Cand != LastCand; ++Cand) {
if (!Filter(*Cand))
continue;
if (Cand->Viable)
Cands.push_back(Cand);
else if (OCD == OCD_AllCandidates) {
CompleteNonViableCandidate(S, Cand, Args);
if (Cand->Function || Cand->IsSurrogate)
Cands.push_back(Cand);
// Otherwise, this a non-viable builtin candidate. We do not, in general,
// want to list every possible builtin candidate.
}
}
std::stable_sort(Cands.begin(), Cands.end(),
CompareOverloadCandidatesForDisplay(S, OpLoc, Args.size(), Kind));
bool ReportedAmbiguousConversions = false;
SmallVectorImpl<OverloadCandidate*>::iterator I, E;
const OverloadsShown ShowOverloads = S.Diags.getShowOverloads();
unsigned CandsShown = 0;
for (I = Cands.begin(), E = Cands.end(); I != E; ++I) {
OverloadCandidate *Cand = *I;
// Set an arbitrary limit on the number of candidate functions we'll spam
// the user with. FIXME: This limit should depend on details of the
// candidate list.
if (CandsShown >= 4 && ShowOverloads == Ovl_Best) {
break;
}
++CandsShown;
if (Cand->Function)
NoteFunctionCandidate(S, Cand, Args.size(),
/*TakingCandidateAddress=*/false);
else if (Cand->IsSurrogate)
NoteSurrogateCandidate(S, Cand);
else {
assert(Cand->Viable &&
"Non-viable built-in candidates are not added to Cands.");
// Generally we only see ambiguities including viable builtin
// operators if overload resolution got screwed up by an
// ambiguous user-defined conversion.
//
// FIXME: It's quite possible for different conversions to see
// different ambiguities, though.
if (!ReportedAmbiguousConversions) {
NoteAmbiguousUserConversions(S, OpLoc, Cand);
ReportedAmbiguousConversions = true;
}
// If this is a viable builtin, print it.
NoteBuiltinOperatorCandidate(S, Opc, OpLoc, Cand);
}
}
if (I != E)
S.Diag(OpLoc, diag::note_ovl_too_many_candidates) << int(E - I);
}
static SourceLocation
GetLocationForCandidate(const TemplateSpecCandidate *Cand) {
return Cand->Specialization ? Cand->Specialization->getLocation()
: SourceLocation();
}
namespace {
struct CompareTemplateSpecCandidatesForDisplay {
Sema &S;
CompareTemplateSpecCandidatesForDisplay(Sema &S) : S(S) {}
bool operator()(const TemplateSpecCandidate *L,
const TemplateSpecCandidate *R) {
// Fast-path this check.
if (L == R)
return false;
// Assuming that both candidates are not matches...
// Sort by the ranking of deduction failures.
if (L->DeductionFailure.Result != R->DeductionFailure.Result)
return RankDeductionFailure(L->DeductionFailure) <
RankDeductionFailure(R->DeductionFailure);
// Sort everything else by location.
SourceLocation LLoc = GetLocationForCandidate(L);
SourceLocation RLoc = GetLocationForCandidate(R);
// Put candidates without locations (e.g. builtins) at the end.
if (LLoc.isInvalid())
return false;
if (RLoc.isInvalid())
return true;
return S.SourceMgr.isBeforeInTranslationUnit(LLoc, RLoc);
}
};
}
/// Diagnose a template argument deduction failure.
/// We are treating these failures as overload failures due to bad
/// deductions.
void TemplateSpecCandidate::NoteDeductionFailure(Sema &S,
bool ForTakingAddress) {
DiagnoseBadDeduction(S, FoundDecl, Specialization, // pattern
DeductionFailure, /*NumArgs=*/0, ForTakingAddress);
}
void TemplateSpecCandidateSet::destroyCandidates() {
for (iterator i = begin(), e = end(); i != e; ++i) {
i->DeductionFailure.Destroy();
}
}
void TemplateSpecCandidateSet::clear() {
destroyCandidates();
Candidates.clear();
}
/// NoteCandidates - When no template specialization match is found, prints
/// diagnostic messages containing the non-matching specializations that form
/// the candidate set.
/// This is analoguous to OverloadCandidateSet::NoteCandidates() with
/// OCD == OCD_AllCandidates and Cand->Viable == false.
void TemplateSpecCandidateSet::NoteCandidates(Sema &S, SourceLocation Loc) {
// Sort the candidates by position (assuming no candidate is a match).
// Sorting directly would be prohibitive, so we make a set of pointers
// and sort those.
SmallVector<TemplateSpecCandidate *, 32> Cands;
Cands.reserve(size());
for (iterator Cand = begin(), LastCand = end(); Cand != LastCand; ++Cand) {
if (Cand->Specialization)
Cands.push_back(Cand);
// Otherwise, this is a non-matching builtin candidate. We do not,
// in general, want to list every possible builtin candidate.
}
llvm::sort(Cands.begin(), Cands.end(),
CompareTemplateSpecCandidatesForDisplay(S));
// FIXME: Perhaps rename OverloadsShown and getShowOverloads()
// for generalization purposes (?).
const OverloadsShown ShowOverloads = S.Diags.getShowOverloads();
SmallVectorImpl<TemplateSpecCandidate *>::iterator I, E;
unsigned CandsShown = 0;
for (I = Cands.begin(), E = Cands.end(); I != E; ++I) {
TemplateSpecCandidate *Cand = *I;
// Set an arbitrary limit on the number of candidates we'll spam
// the user with. FIXME: This limit should depend on details of the
// candidate list.
if (CandsShown >= 4 && ShowOverloads == Ovl_Best)
break;
++CandsShown;
assert(Cand->Specialization &&
"Non-matching built-in candidates are not added to Cands.");
Cand->NoteDeductionFailure(S, ForTakingAddress);
}
if (I != E)
S.Diag(Loc, diag::note_ovl_too_many_candidates) << int(E - I);
}
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType Sema::ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType) {
QualType Ret = PossiblyAFunctionType;
if (const PointerType *ToTypePtr =
PossiblyAFunctionType->getAs<PointerType>())
Ret = ToTypePtr->getPointeeType();
else if (const ReferenceType *ToTypeRef =
PossiblyAFunctionType->getAs<ReferenceType>())
Ret = ToTypeRef->getPointeeType();
else if (const MemberPointerType *MemTypePtr =
PossiblyAFunctionType->getAs<MemberPointerType>())
Ret = MemTypePtr->getPointeeType();
Ret =
Context.getCanonicalType(Ret).getUnqualifiedType();
return Ret;
}
static bool completeFunctionType(Sema &S, FunctionDecl *FD, SourceLocation Loc,
bool Complain = true) {
if (S.getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() &&
S.DeduceReturnType(FD, Loc, Complain))
return true;
auto *FPT = FD->getType()->castAs<FunctionProtoType>();
if (S.getLangOpts().CPlusPlus17 &&
isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
!S.ResolveExceptionSpec(Loc, FPT))
return true;
return false;
}
namespace {
// A helper class to help with address of function resolution
// - allows us to avoid passing around all those ugly parameters
class AddressOfFunctionResolver {
Sema& S;
Expr* SourceExpr;
const QualType& TargetType;
QualType TargetFunctionType; // Extracted function type from target type
bool Complain;
//DeclAccessPair& ResultFunctionAccessPair;
ASTContext& Context;
bool TargetTypeIsNonStaticMemberFunction;
bool FoundNonTemplateFunction;
bool StaticMemberFunctionFromBoundPointer;
bool HasComplained;
OverloadExpr::FindResult OvlExprInfo;
OverloadExpr *OvlExpr;
TemplateArgumentListInfo OvlExplicitTemplateArgs;
SmallVector<std::pair<DeclAccessPair, FunctionDecl*>, 4> Matches;
TemplateSpecCandidateSet FailedCandidates;
public:
AddressOfFunctionResolver(Sema &S, Expr *SourceExpr,
const QualType &TargetType, bool Complain)
: S(S), SourceExpr(SourceExpr), TargetType(TargetType),
Complain(Complain), Context(S.getASTContext()),
TargetTypeIsNonStaticMemberFunction(
!!TargetType->getAs<MemberPointerType>()),
FoundNonTemplateFunction(false),
StaticMemberFunctionFromBoundPointer(false),
HasComplained(false),
OvlExprInfo(OverloadExpr::find(SourceExpr)),
OvlExpr(OvlExprInfo.Expression),
FailedCandidates(OvlExpr->getNameLoc(), /*ForTakingAddress=*/true) {
ExtractUnqualifiedFunctionTypeFromTargetType();
if (TargetFunctionType->isFunctionType()) {
if (UnresolvedMemberExpr *UME = dyn_cast<UnresolvedMemberExpr>(OvlExpr))
if (!UME->isImplicitAccess() &&
!S.ResolveSingleFunctionTemplateSpecialization(UME))
StaticMemberFunctionFromBoundPointer = true;
} else if (OvlExpr->hasExplicitTemplateArgs()) {
DeclAccessPair dap;
if (FunctionDecl *Fn = S.ResolveSingleFunctionTemplateSpecialization(
OvlExpr, false, &dap)) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn))
if (!Method->isStatic()) {
// If the target type is a non-function type and the function found
// is a non-static member function, pretend as if that was the
// target, it's the only possible type to end up with.
TargetTypeIsNonStaticMemberFunction = true;
// And skip adding the function if its not in the proper form.
// We'll diagnose this due to an empty set of functions.
if (!OvlExprInfo.HasFormOfMemberPointer)
return;
}
Matches.push_back(std::make_pair(dap, Fn));
}
return;
}
if (OvlExpr->hasExplicitTemplateArgs())
OvlExpr->copyTemplateArgumentsInto(OvlExplicitTemplateArgs);
if (FindAllFunctionsThatMatchTargetTypeExactly()) {
// C++ [over.over]p4:
// If more than one function is selected, [...]
if (Matches.size() > 1 && !eliminiateSuboptimalOverloadCandidates()) {
if (FoundNonTemplateFunction)
EliminateAllTemplateMatches();
else
EliminateAllExceptMostSpecializedTemplate();
}
}
if (S.getLangOpts().CUDA && Matches.size() > 1)
EliminateSuboptimalCudaMatches();
}
bool hasComplained() const { return HasComplained; }
private:
bool candidateHasExactlyCorrectType(const FunctionDecl *FD) {
QualType Discard;
return Context.hasSameUnqualifiedType(TargetFunctionType, FD->getType()) ||
S.IsFunctionConversion(FD->getType(), TargetFunctionType, Discard);
}
/// \return true if A is considered a better overload candidate for the
/// desired type than B.
bool isBetterCandidate(const FunctionDecl *A, const FunctionDecl *B) {
// If A doesn't have exactly the correct type, we don't want to classify it
// as "better" than anything else. This way, the user is required to
// disambiguate for us if there are multiple candidates and no exact match.
return candidateHasExactlyCorrectType(A) &&
(!candidateHasExactlyCorrectType(B) ||
compareEnableIfAttrs(S, A, B) == Comparison::Better);
}
/// \return true if we were able to eliminate all but one overload candidate,
/// false otherwise.
bool eliminiateSuboptimalOverloadCandidates() {
// Same algorithm as overload resolution -- one pass to pick the "best",
// another pass to be sure that nothing is better than the best.
auto Best = Matches.begin();
for (auto I = Matches.begin()+1, E = Matches.end(); I != E; ++I)
if (isBetterCandidate(I->second, Best->second))
Best = I;
const FunctionDecl *BestFn = Best->second;
auto IsBestOrInferiorToBest = [this, BestFn](
const std::pair<DeclAccessPair, FunctionDecl *> &Pair) {
return BestFn == Pair.second || isBetterCandidate(BestFn, Pair.second);
};
// Note: We explicitly leave Matches unmodified if there isn't a clear best
// option, so we can potentially give the user a better error
if (!std::all_of(Matches.begin(), Matches.end(), IsBestOrInferiorToBest))
return false;
Matches[0] = *Best;
Matches.resize(1);
return true;
}
bool isTargetTypeAFunction() const {
return TargetFunctionType->isFunctionType();
}
// [ToType] [Return]
// R (*)(A) --> R (A), IsNonStaticMemberFunction = false
// R (&)(A) --> R (A), IsNonStaticMemberFunction = false
// R (S::*)(A) --> R (A), IsNonStaticMemberFunction = true
void inline ExtractUnqualifiedFunctionTypeFromTargetType() {
TargetFunctionType = S.ExtractUnqualifiedFunctionType(TargetType);
}
// return true if any matching specializations were found
bool AddMatchingTemplateFunction(FunctionTemplateDecl* FunctionTemplate,
const DeclAccessPair& CurAccessFunPair) {
if (CXXMethodDecl *Method
= dyn_cast<CXXMethodDecl>(FunctionTemplate->getTemplatedDecl())) {
// Skip non-static function templates when converting to pointer, and
// static when converting to member pointer.
if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
return false;
}
else if (TargetTypeIsNonStaticMemberFunction)
return false;
// C++ [over.over]p2:
// If the name is a function template, template argument deduction is
// done (14.8.2.2), and if the argument deduction succeeds, the
// resulting template argument list is used to generate a single
// function template specialization, which is added to the set of
// overloaded functions considered.
FunctionDecl *Specialization = nullptr;
TemplateDeductionInfo Info(FailedCandidates.getLocation());
if (Sema::TemplateDeductionResult Result
= S.DeduceTemplateArguments(FunctionTemplate,
&OvlExplicitTemplateArgs,
TargetFunctionType, Specialization,
Info, /*IsAddressOfFunction*/true)) {
// Make a note of the failed deduction for diagnostics.
FailedCandidates.addCandidate()
.set(CurAccessFunPair, FunctionTemplate->getTemplatedDecl(),
MakeDeductionFailureInfo(Context, Result, Info));
return false;
}
// Template argument deduction ensures that we have an exact match or
// compatible pointer-to-function arguments that would be adjusted by ICS.
// This function template specicalization works.
assert(S.isSameOrCompatibleFunctionType(
Context.getCanonicalType(Specialization->getType()),
Context.getCanonicalType(TargetFunctionType)));
if (!S.checkAddressOfFunctionIsAvailable(Specialization))
return false;
Matches.push_back(std::make_pair(CurAccessFunPair, Specialization));
return true;
}
bool AddMatchingNonTemplateFunction(NamedDecl* Fn,
const DeclAccessPair& CurAccessFunPair) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
// Skip non-static functions when converting to pointer, and static
// when converting to member pointer.
if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
return false;
}
else if (TargetTypeIsNonStaticMemberFunction)
return false;
if (FunctionDecl *FunDecl = dyn_cast<FunctionDecl>(Fn)) {
if (S.getLangOpts().CUDA)
if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext))
if (!Caller->isImplicit() && !S.IsAllowedCUDACall(Caller, FunDecl))
return false;
if (FunDecl->isMultiVersion()) {
const auto *TA = FunDecl->getAttr<TargetAttr>();
if (TA && !TA->isDefaultVersion())
return false;
}
// If any candidate has a placeholder return type, trigger its deduction
// now.
if (completeFunctionType(S, FunDecl, SourceExpr->getLocStart(),
Complain)) {
HasComplained |= Complain;
return false;
}
if (!S.checkAddressOfFunctionIsAvailable(FunDecl))
return false;
// If we're in C, we need to support types that aren't exactly identical.
if (!S.getLangOpts().CPlusPlus ||
candidateHasExactlyCorrectType(FunDecl)) {
Matches.push_back(std::make_pair(
CurAccessFunPair, cast<FunctionDecl>(FunDecl->getCanonicalDecl())));
FoundNonTemplateFunction = true;
return true;
}
}
return false;
}
bool FindAllFunctionsThatMatchTargetTypeExactly() {
bool Ret = false;
// If the overload expression doesn't have the form of a pointer to
// member, don't try to convert it to a pointer-to-member type.
if (IsInvalidFormOfPointerToMemberFunction())
return false;
for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
E = OvlExpr->decls_end();
I != E; ++I) {
// Look through any using declarations to find the underlying function.
NamedDecl *Fn = (*I)->getUnderlyingDecl();
// C++ [over.over]p3:
// Non-member functions and static member functions match
// targets of type "pointer-to-function" or "reference-to-function."
// Nonstatic member functions match targets of
// type "pointer-to-member-function."
// Note that according to DR 247, the containing class does not matter.
if (FunctionTemplateDecl *FunctionTemplate
= dyn_cast<FunctionTemplateDecl>(Fn)) {
if (AddMatchingTemplateFunction(FunctionTemplate, I.getPair()))
Ret = true;
}
// If we have explicit template arguments supplied, skip non-templates.
else if (!OvlExpr->hasExplicitTemplateArgs() &&
AddMatchingNonTemplateFunction(Fn, I.getPair()))
Ret = true;
}
assert(Ret || Matches.empty());
return Ret;
}
void EliminateAllExceptMostSpecializedTemplate() {
// [...] and any given function template specialization F1 is
// eliminated if the set contains a second function template
// specialization whose function template is more specialized
// than the function template of F1 according to the partial
// ordering rules of 14.5.5.2.
// The algorithm specified above is quadratic. We instead use a
// two-pass algorithm (similar to the one used to identify the
// best viable function in an overload set) that identifies the
// best function template (if it exists).
UnresolvedSet<4> MatchesCopy; // TODO: avoid!
for (unsigned I = 0, E = Matches.size(); I != E; ++I)
MatchesCopy.addDecl(Matches[I].second, Matches[I].first.getAccess());
// TODO: It looks like FailedCandidates does not serve much purpose
// here, since the no_viable diagnostic has index 0.
UnresolvedSetIterator Result = S.getMostSpecialized(
MatchesCopy.begin(), MatchesCopy.end(), FailedCandidates,
SourceExpr->getLocStart(), S.PDiag(),
S.PDiag(diag::err_addr_ovl_ambiguous)
<< Matches[0].second->getDeclName(),
S.PDiag(diag::note_ovl_candidate)
<< (unsigned)oc_function << (unsigned)ocs_described_template,
Complain, TargetFunctionType);
if (Result != MatchesCopy.end()) {
// Make it the first and only element
Matches[0].first = Matches[Result - MatchesCopy.begin()].first;
Matches[0].second = cast<FunctionDecl>(*Result);
Matches.resize(1);
} else
HasComplained |= Complain;
}
void EliminateAllTemplateMatches() {
// [...] any function template specializations in the set are
// eliminated if the set also contains a non-template function, [...]
for (unsigned I = 0, N = Matches.size(); I != N; ) {
if (Matches[I].second->getPrimaryTemplate() == nullptr)
++I;
else {
Matches[I] = Matches[--N];
Matches.resize(N);
}
}
}
void EliminateSuboptimalCudaMatches() {
S.EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(S.CurContext), Matches);
}
public:
void ComplainNoMatchesFound() const {
assert(Matches.empty());
S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_no_viable)
<< OvlExpr->getName() << TargetFunctionType
<< OvlExpr->getSourceRange();
if (FailedCandidates.empty())
S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType,
/*TakingAddress=*/true);
else {
// We have some deduction failure messages. Use them to diagnose
// the function templates, and diagnose the non-template candidates
// normally.
for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
IEnd = OvlExpr->decls_end();
I != IEnd; ++I)
if (FunctionDecl *Fun =
dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()))
if (!functionHasPassObjectSizeParams(Fun))
S.NoteOverloadCandidate(*I, Fun, TargetFunctionType,
/*TakingAddress=*/true);
FailedCandidates.NoteCandidates(S, OvlExpr->getLocStart());
}
}
bool IsInvalidFormOfPointerToMemberFunction() const {
return TargetTypeIsNonStaticMemberFunction &&
!OvlExprInfo.HasFormOfMemberPointer;
}
void ComplainIsInvalidFormOfPointerToMemberFunction() const {
// TODO: Should we condition this on whether any functions might
// have matched, or is it more appropriate to do that in callers?
// TODO: a fixit wouldn't hurt.
S.Diag(OvlExpr->getNameLoc(), diag::err_addr_ovl_no_qualifier)
<< TargetType << OvlExpr->getSourceRange();
}
bool IsStaticMemberFunctionFromBoundPointer() const {
return StaticMemberFunctionFromBoundPointer;
}
void ComplainIsStaticMemberFunctionFromBoundPointer() const {
S.Diag(OvlExpr->getLocStart(),
diag::err_invalid_form_pointer_member_function)
<< OvlExpr->getSourceRange();
}
void ComplainOfInvalidConversion() const {
S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_not_func_ptrref)
<< OvlExpr->getName() << TargetType;
}
void ComplainMultipleMatchesFound() const {
assert(Matches.size() > 1);
S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_ambiguous)
<< OvlExpr->getName()
<< OvlExpr->getSourceRange();
S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType,
/*TakingAddress=*/true);
}
bool hadMultipleCandidates() const { return (OvlExpr->getNumDecls() > 1); }
int getNumMatches() const { return Matches.size(); }
FunctionDecl* getMatchingFunctionDecl() const {
if (Matches.size() != 1) return nullptr;
return Matches[0].second;
}
const DeclAccessPair* getMatchingFunctionAccessPair() const {
if (Matches.size() != 1) return nullptr;
return &Matches[0].first;
}
};
}
/// ResolveAddressOfOverloadedFunction - Try to resolve the address of
/// an overloaded function (C++ [over.over]), where @p From is an
/// expression with overloaded function type and @p ToType is the type
/// we're trying to resolve to. For example:
///
/// @code
/// int f(double);
/// int f(int);
///
/// int (*pfd)(double) = f; // selects f(double)
/// @endcode
///
/// This routine returns the resulting FunctionDecl if it could be
/// resolved, and NULL otherwise. When @p Complain is true, this
/// routine will emit diagnostics if there is an error.
FunctionDecl *
Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &FoundResult,
bool *pHadMultipleCandidates) {
assert(AddressOfExpr->getType() == Context.OverloadTy);
AddressOfFunctionResolver Resolver(*this, AddressOfExpr, TargetType,
Complain);
int NumMatches = Resolver.getNumMatches();
FunctionDecl *Fn = nullptr;
bool ShouldComplain = Complain && !Resolver.hasComplained();
if (NumMatches == 0 && ShouldComplain) {
if (Resolver.IsInvalidFormOfPointerToMemberFunction())
Resolver.ComplainIsInvalidFormOfPointerToMemberFunction();
else
Resolver.ComplainNoMatchesFound();
}
else if (NumMatches > 1 && ShouldComplain)
Resolver.ComplainMultipleMatchesFound();
else if (NumMatches == 1) {
Fn = Resolver.getMatchingFunctionDecl();
assert(Fn);
if (auto *FPT = Fn->getType()->getAs<FunctionProtoType>())
ResolveExceptionSpec(AddressOfExpr->getExprLoc(), FPT);
FoundResult = *Resolver.getMatchingFunctionAccessPair();
if (Complain) {
if (Resolver.IsStaticMemberFunctionFromBoundPointer())
Resolver.ComplainIsStaticMemberFunctionFromBoundPointer();
else
CheckAddressOfMemberAccess(AddressOfExpr, FoundResult);
}
}
if (pHadMultipleCandidates)
*pHadMultipleCandidates = Resolver.hadMultipleCandidates();
return Fn;
}
/// Given an expression that refers to an overloaded function, try to
/// resolve that function to a single function that can have its address taken.
/// This will modify `Pair` iff it returns non-null.
///
/// This routine can only realistically succeed if all but one candidates in the
/// overload set for SrcExpr cannot have their addresses taken.
FunctionDecl *
Sema::resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &Pair) {
OverloadExpr::FindResult R = OverloadExpr::find(E);
OverloadExpr *Ovl = R.Expression;
FunctionDecl *Result = nullptr;
DeclAccessPair DAP;
// Don't use the AddressOfResolver because we're specifically looking for
// cases where we have one overload candidate that lacks
// enable_if/pass_object_size/...
for (auto I = Ovl->decls_begin(), E = Ovl->decls_end(); I != E; ++I) {
auto *FD = dyn_cast<FunctionDecl>(I->getUnderlyingDecl());
if (!FD)
return nullptr;
if (!checkAddressOfFunctionIsAvailable(FD))
continue;
// We have more than one result; quit.
if (Result)
return nullptr;
DAP = I.getPair();
Result = FD;
}
if (Result)
Pair = DAP;
return Result;
}
/// Given an overloaded function, tries to turn it into a non-overloaded
/// function reference using resolveAddressOfOnlyViableOverloadCandidate. This
/// will perform access checks, diagnose the use of the resultant decl, and, if
/// requested, potentially perform a function-to-pointer decay.
///
/// Returns false if resolveAddressOfOnlyViableOverloadCandidate fails.
/// Otherwise, returns true. This may emit diagnostics and return true.
bool Sema::resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConverion) {
Expr *E = SrcExpr.get();
assert(E->getType() == Context.OverloadTy && "SrcExpr must be an overload");
DeclAccessPair DAP;
FunctionDecl *Found = resolveAddressOfOnlyViableOverloadCandidate(E, DAP);
if (!Found || Found->isCPUDispatchMultiVersion() ||
Found->isCPUSpecificMultiVersion())
return false;
// Emitting multiple diagnostics for a function that is both inaccessible and
// unavailable is consistent with our behavior elsewhere. So, always check
// for both.
DiagnoseUseOfDecl(Found, E->getExprLoc());
CheckAddressOfMemberAccess(E, DAP);
Expr *Fixed = FixOverloadedFunctionReference(E, DAP, Found);
if (DoFunctionPointerConverion && Fixed->getType()->isFunctionType())
SrcExpr = DefaultFunctionArrayConversion(Fixed, /*Diagnose=*/false);
else
SrcExpr = Fixed;
return true;
}
/// Given an expression that refers to an overloaded function, try to
/// resolve that overloaded function expression down to a single function.
///
/// This routine can only resolve template-ids that refer to a single function
/// template, where that template-id refers to a single template whose template
/// arguments are either provided by the template-id or have defaults,
/// as described in C++0x [temp.arg.explicit]p3.
///
/// If no template-ids are found, no diagnostics are emitted and NULL is
/// returned.
FunctionDecl *
Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain,
DeclAccessPair *FoundResult) {
// C++ [over.over]p1:
// [...] [Note: any redundant set of parentheses surrounding the
// overloaded function name is ignored (5.1). ]
// C++ [over.over]p1:
// [...] The overloaded function name can be preceded by the &
// operator.
// If we didn't actually find any template-ids, we're done.
if (!ovl->hasExplicitTemplateArgs())
return nullptr;
TemplateArgumentListInfo ExplicitTemplateArgs;
ovl->copyTemplateArgumentsInto(ExplicitTemplateArgs);
TemplateSpecCandidateSet FailedCandidates(ovl->getNameLoc());
// Look through all of the overloaded functions, searching for one
// whose type matches exactly.
FunctionDecl *Matched = nullptr;
for (UnresolvedSetIterator I = ovl->decls_begin(),
E = ovl->decls_end(); I != E; ++I) {
// C++0x [temp.arg.explicit]p3:
// [...] In contexts where deduction is done and fails, or in contexts
// where deduction is not done, if a template argument list is
// specified and it, along with any default template arguments,
// identifies a single function template specialization, then the
// template-id is an lvalue for the function template specialization.
FunctionTemplateDecl *FunctionTemplate
= cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl());
// C++ [over.over]p2:
// If the name is a function template, template argument deduction is
// done (14.8.2.2), and if the argument deduction succeeds, the
// resulting template argument list is used to generate a single
// function template specialization, which is added to the set of
// overloaded functions considered.
FunctionDecl *Specialization = nullptr;
TemplateDeductionInfo Info(FailedCandidates.getLocation());
if (TemplateDeductionResult Result
= DeduceTemplateArguments(FunctionTemplate, &ExplicitTemplateArgs,
Specialization, Info,
/*IsAddressOfFunction*/true)) {
// Make a note of the failed deduction for diagnostics.
// TODO: Actually use the failed-deduction info?
FailedCandidates.addCandidate()
.set(I.getPair(), FunctionTemplate->getTemplatedDecl(),
MakeDeductionFailureInfo(Context, Result, Info));
continue;
}
assert(Specialization && "no specialization and no error?");
// Multiple matches; we can't resolve to a single declaration.
if (Matched) {
if (Complain) {
Diag(ovl->getExprLoc(), diag::err_addr_ovl_ambiguous)
<< ovl->getName();
NoteAllOverloadCandidates(ovl);
}
return nullptr;
}
Matched = Specialization;
if (FoundResult) *FoundResult = I.getPair();
}
if (Matched &&
completeFunctionType(*this, Matched, ovl->getExprLoc(), Complain))
return nullptr;
return Matched;
}
// Resolve and fix an overloaded expression that can be resolved
// because it identifies a single function template specialization.
//
// Last three arguments should only be supplied if Complain = true
//
// Return true if it was logically possible to so resolve the
// expression, regardless of whether or not it succeeded. Always
// returns true if 'complain' is set.
bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr, bool doFunctionPointerConverion,
bool complain, SourceRange OpRangeForComplaining,
QualType DestTypeForComplaining,
unsigned DiagIDForComplaining) {
assert(SrcExpr.get()->getType() == Context.OverloadTy);
OverloadExpr::FindResult ovl = OverloadExpr::find(SrcExpr.get());
DeclAccessPair found;
ExprResult SingleFunctionExpression;
if (FunctionDecl *fn = ResolveSingleFunctionTemplateSpecialization(
ovl.Expression, /*complain*/ false, &found)) {
if (DiagnoseUseOfDecl(fn, SrcExpr.get()->getLocStart())) {
SrcExpr = ExprError();
return true;
}
// It is only correct to resolve to an instance method if we're
// resolving a form that's permitted to be a pointer to member.
// Otherwise we'll end up making a bound member expression, which
// is illegal in all the contexts we resolve like this.
if (!ovl.HasFormOfMemberPointer &&
isa<CXXMethodDecl>(fn) &&
cast<CXXMethodDecl>(fn)->isInstance()) {
if (!complain) return false;
Diag(ovl.Expression->getExprLoc(),
diag::err_bound_member_function)
<< 0 << ovl.Expression->getSourceRange();
// TODO: I believe we only end up here if there's a mix of
// static and non-static candidates (otherwise the expression
// would have 'bound member' type, not 'overload' type).
// Ideally we would note which candidate was chosen and why
// the static candidates were rejected.
SrcExpr = ExprError();
return true;
}
// Fix the expression to refer to 'fn'.
SingleFunctionExpression =
FixOverloadedFunctionReference(SrcExpr.get(), found, fn);
// If desired, do function-to-pointer decay.
if (doFunctionPointerConverion) {
SingleFunctionExpression =
DefaultFunctionArrayLvalueConversion(SingleFunctionExpression.get());
if (SingleFunctionExpression.isInvalid()) {
SrcExpr = ExprError();
return true;
}
}
}
if (!SingleFunctionExpression.isUsable()) {
if (complain) {
Diag(OpRangeForComplaining.getBegin(), DiagIDForComplaining)
<< ovl.Expression->getName()
<< DestTypeForComplaining
<< OpRangeForComplaining
<< ovl.Expression->getQualifierLoc().getSourceRange();
NoteAllOverloadCandidates(SrcExpr.get());
SrcExpr = ExprError();
return true;
}
return false;
}
SrcExpr = SingleFunctionExpression;
return true;
}
/// Add a single candidate to the overload set.
static void AddOverloadedCallCandidate(Sema &S,
DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading,
bool KnownValid) {
NamedDecl *Callee = FoundDecl.getDecl();
if (isa<UsingShadowDecl>(Callee))
Callee = cast<UsingShadowDecl>(Callee)->getTargetDecl();
if (FunctionDecl *Func = dyn_cast<FunctionDecl>(Callee)) {
if (ExplicitTemplateArgs) {
assert(!KnownValid && "Explicit template arguments?");
return;
}
// Prevent ill-formed function decls to be added as overload candidates.
if (!dyn_cast<FunctionProtoType>(Func->getType()->getAs<FunctionType>()))
return;
S.AddOverloadCandidate(Func, FoundDecl, Args, CandidateSet,
/*SuppressUsedConversions=*/false,
PartialOverloading);
return;
}
if (FunctionTemplateDecl *FuncTemplate
= dyn_cast<FunctionTemplateDecl>(Callee)) {
S.AddTemplateOverloadCandidate(FuncTemplate, FoundDecl,
ExplicitTemplateArgs, Args, CandidateSet,
/*SuppressUsedConversions=*/false,
PartialOverloading);
return;
}
assert(!KnownValid && "unhandled case in overloaded call candidate");
}
/// Add the overload candidates named by callee and/or found by argument
/// dependent lookup to the given overload set.
void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading) {
#ifndef NDEBUG
// Verify that ArgumentDependentLookup is consistent with the rules
// in C++0x [basic.lookup.argdep]p3:
//
// Let X be the lookup set produced by unqualified lookup (3.4.1)
// and let Y be the lookup set produced by argument dependent
// lookup (defined as follows). If X contains
//
// -- a declaration of a class member, or
//
// -- a block-scope function declaration that is not a
// using-declaration, or
//
// -- a declaration that is neither a function or a function
// template
//
// then Y is empty.
if (ULE->requiresADL()) {
for (UnresolvedLookupExpr::decls_iterator I = ULE->decls_begin(),
E = ULE->decls_end(); I != E; ++I) {
assert(!(*I)->getDeclContext()->isRecord());
assert(isa<UsingShadowDecl>(*I) ||
!(*I)->getDeclContext()->isFunctionOrMethod());
assert((*I)->getUnderlyingDecl()->isFunctionOrFunctionTemplate());
}
}
#endif
// It would be nice to avoid this copy.
TemplateArgumentListInfo TABuffer;
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr;
if (ULE->hasExplicitTemplateArgs()) {
ULE->copyTemplateArgumentsInto(TABuffer);
ExplicitTemplateArgs = &TABuffer;
}
for (UnresolvedLookupExpr::decls_iterator I = ULE->decls_begin(),
E = ULE->decls_end(); I != E; ++I)
AddOverloadedCallCandidate(*this, I.getPair(), ExplicitTemplateArgs, Args,
CandidateSet, PartialOverloading,
/*KnownValid*/ true);
if (ULE->requiresADL())
AddArgumentDependentLookupCandidates(ULE->getName(), ULE->getExprLoc(),
Args, ExplicitTemplateArgs,
CandidateSet, PartialOverloading);
}
/// Determine whether a declaration with the specified name could be moved into
/// a different namespace.
static bool canBeDeclaredInNamespace(const DeclarationName &Name) {
switch (Name.getCXXOverloadedOperator()) {
case OO_New: case OO_Array_New:
case OO_Delete: case OO_Array_Delete:
return false;
default:
return true;
}
}
/// Attempt to recover from an ill-formed use of a non-dependent name in a
/// template, where the non-dependent name was declared after the template
/// was defined. This is common in code written for a compilers which do not
/// correctly implement two-stage name lookup.
///
/// Returns true if a viable candidate was found and a diagnostic was issued.
static bool
DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc,
const CXXScopeSpec &SS, LookupResult &R,
OverloadCandidateSet::CandidateSetKind CSK,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args,
bool *DoDiagnoseEmptyLookup = nullptr) {
if (!SemaRef.inTemplateInstantiation() || !SS.isEmpty())
return false;
for (DeclContext *DC = SemaRef.CurContext; DC; DC = DC->getParent()) {
if (DC->isTransparentContext())
continue;
SemaRef.LookupQualifiedName(R, DC);
if (!R.empty()) {
R.suppressDiagnostics();
if (isa<CXXRecordDecl>(DC)) {
// Don't diagnose names we find in classes; we get much better
// diagnostics for these from DiagnoseEmptyLookup.
R.clear();
if (DoDiagnoseEmptyLookup)
*DoDiagnoseEmptyLookup = true;
return false;
}
OverloadCandidateSet Candidates(FnLoc, CSK);
for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
AddOverloadedCallCandidate(SemaRef, I.getPair(),
ExplicitTemplateArgs, Args,
Candidates, false, /*KnownValid*/ false);
OverloadCandidateSet::iterator Best;
if (Candidates.BestViableFunction(SemaRef, FnLoc, Best) != OR_Success) {
// No viable functions. Don't bother the user with notes for functions
// which don't work and shouldn't be found anyway.
R.clear();
return false;
}
// Find the namespaces where ADL would have looked, and suggest
// declaring the function there instead.
Sema::AssociatedNamespaceSet AssociatedNamespaces;
Sema::AssociatedClassSet AssociatedClasses;
SemaRef.FindAssociatedClassesAndNamespaces(FnLoc, Args,
AssociatedNamespaces,
AssociatedClasses);
Sema::AssociatedNamespaceSet SuggestedNamespaces;
if (canBeDeclaredInNamespace(R.getLookupName())) {
DeclContext *Std = SemaRef.getStdNamespace();
for (Sema::AssociatedNamespaceSet::iterator
it = AssociatedNamespaces.begin(),
end = AssociatedNamespaces.end(); it != end; ++it) {
// Never suggest declaring a function within namespace 'std'.
if (Std && Std->Encloses(*it))
continue;
// Never suggest declaring a function within a namespace with a
// reserved name, like __gnu_cxx.
NamespaceDecl *NS = dyn_cast<NamespaceDecl>(*it);
if (NS &&
NS->getQualifiedNameAsString().find("__") != std::string::npos)
continue;
SuggestedNamespaces.insert(*it);
}
}
SemaRef.Diag(R.getNameLoc(), diag::err_not_found_by_two_phase_lookup)
<< R.getLookupName();
if (SuggestedNamespaces.empty()) {
SemaRef.Diag(Best->Function->getLocation(),
diag::note_not_found_by_two_phase_lookup)
<< R.getLookupName() << 0;
} else if (SuggestedNamespaces.size() == 1) {
SemaRef.Diag(Best->Function->getLocation(),
diag::note_not_found_by_two_phase_lookup)
<< R.getLookupName() << 1 << *SuggestedNamespaces.begin();
} else {
// FIXME: It would be useful to list the associated namespaces here,
// but the diagnostics infrastructure doesn't provide a way to produce
// a localized representation of a list of items.
SemaRef.Diag(Best->Function->getLocation(),
diag::note_not_found_by_two_phase_lookup)
<< R.getLookupName() << 2;
}
// Try to recover by calling this function.
return true;
}
R.clear();
}
return false;
}
/// Attempt to recover from ill-formed use of a non-dependent operator in a
/// template, where the non-dependent operator was declared after the template
/// was defined.
///
/// Returns true if a viable candidate was found and a diagnostic was issued.
static bool
DiagnoseTwoPhaseOperatorLookup(Sema &SemaRef, OverloadedOperatorKind Op,
SourceLocation OpLoc,
ArrayRef<Expr *> Args) {
DeclarationName OpName =
SemaRef.Context.DeclarationNames.getCXXOperatorName(Op);
LookupResult R(SemaRef, OpName, OpLoc, Sema::LookupOperatorName);
return DiagnoseTwoPhaseLookup(SemaRef, OpLoc, CXXScopeSpec(), R,
OverloadCandidateSet::CSK_Operator,
/*ExplicitTemplateArgs=*/nullptr, Args);
}
namespace {
class BuildRecoveryCallExprRAII {
Sema &SemaRef;
public:
BuildRecoveryCallExprRAII(Sema &S) : SemaRef(S) {
assert(SemaRef.IsBuildingRecoveryCallExpr == false);
SemaRef.IsBuildingRecoveryCallExpr = true;
}
~BuildRecoveryCallExprRAII() {
SemaRef.IsBuildingRecoveryCallExpr = false;
}
};
}
static std::unique_ptr<CorrectionCandidateCallback>
MakeValidator(Sema &SemaRef, MemberExpr *ME, size_t NumArgs,
bool HasTemplateArgs, bool AllowTypoCorrection) {
if (!AllowTypoCorrection)
return llvm::make_unique<NoTypoCorrectionCCC>();
return llvm::make_unique<FunctionCallFilterCCC>(SemaRef, NumArgs,
HasTemplateArgs, ME);
}
/// Attempts to recover from a call where no functions were found.
///
/// Returns true if new candidates were found.
static ExprResult
BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MutableArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool EmptyLookup, bool AllowTypoCorrection) {
// Do not try to recover if it is already building a recovery call.
// This stops infinite loops for template instantiations like
//
// template <typename T> auto foo(T t) -> decltype(foo(t)) {}
// template <typename T> auto foo(T t) -> decltype(foo(&t)) {}
//
if (SemaRef.IsBuildingRecoveryCallExpr)
return ExprError();
BuildRecoveryCallExprRAII RCE(SemaRef);
CXXScopeSpec SS;
SS.Adopt(ULE->getQualifierLoc());
SourceLocation TemplateKWLoc = ULE->getTemplateKeywordLoc();
TemplateArgumentListInfo TABuffer;
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr;
if (ULE->hasExplicitTemplateArgs()) {
ULE->copyTemplateArgumentsInto(TABuffer);
ExplicitTemplateArgs = &TABuffer;
}
LookupResult R(SemaRef, ULE->getName(), ULE->getNameLoc(),
Sema::LookupOrdinaryName);
bool DoDiagnoseEmptyLookup = EmptyLookup;
if (!DiagnoseTwoPhaseLookup(SemaRef, Fn->getExprLoc(), SS, R,
OverloadCandidateSet::CSK_Normal,
ExplicitTemplateArgs, Args,
&DoDiagnoseEmptyLookup) &&
(!DoDiagnoseEmptyLookup || SemaRef.DiagnoseEmptyLookup(
S, SS, R,
MakeValidator(SemaRef, dyn_cast<MemberExpr>(Fn), Args.size(),
ExplicitTemplateArgs != nullptr, AllowTypoCorrection),
ExplicitTemplateArgs, Args)))
return ExprError();
assert(!R.empty() && "lookup results empty despite recovery");
// If recovery created an ambiguity, just bail out.
if (R.isAmbiguous()) {
R.suppressDiagnostics();
return ExprError();
}
// Build an implicit member call if appropriate. Just drop the
// casts and such from the call, we don't really care.
ExprResult NewFn = ExprError();
if ((*R.begin())->isCXXClassMember())
NewFn = SemaRef.BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R,
ExplicitTemplateArgs, S);
else if (ExplicitTemplateArgs || TemplateKWLoc.isValid())
NewFn = SemaRef.BuildTemplateIdExpr(SS, TemplateKWLoc, R, false,
ExplicitTemplateArgs);
else
NewFn = SemaRef.BuildDeclarationNameExpr(SS, R, false);
if (NewFn.isInvalid())
return ExprError();
// This shouldn't cause an infinite loop because we're giving it
// an expression with viable lookup results, which should never
// end up here.
return SemaRef.ActOnCallExpr(/*Scope*/ nullptr, NewFn.get(), LParenLoc,
MultiExprArg(Args.data(), Args.size()),
RParenLoc);
}
/// Constructs and populates an OverloadedCandidateSet from
/// the given function.
/// \returns true when an the ExprResult output parameter has been set.
bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
MultiExprArg Args,
SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result) {
#ifndef NDEBUG
if (ULE->requiresADL()) {
// To do ADL, we must have found an unqualified name.
assert(!ULE->getQualifier() && "qualified name with ADL");
// We don't perform ADL for implicit declarations of builtins.
// Verify that this was correctly set up.
FunctionDecl *F;
if (ULE->decls_begin() + 1 == ULE->decls_end() &&
(F = dyn_cast<FunctionDecl>(*ULE->decls_begin())) &&
F->getBuiltinID() && F->isImplicit())
llvm_unreachable("performing ADL for builtin");
// We don't perform ADL in C.
assert(getLangOpts().CPlusPlus && "ADL enabled in C");
}
#endif
UnbridgedCastsSet UnbridgedCasts;
if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts)) {
*Result = ExprError();
return true;
}
// Add the functions denoted by the callee to the set of candidate
// functions, including those from argument-dependent lookup.
AddOverloadedCallCandidates(ULE, Args, *CandidateSet);
if (getLangOpts().MSVCCompat &&
CurContext->isDependentContext() && !isSFINAEContext() &&
(isa<FunctionDecl>(CurContext) || isa<CXXRecordDecl>(CurContext))) {
OverloadCandidateSet::iterator Best;
if (CandidateSet->empty() ||
CandidateSet->BestViableFunction(*this, Fn->getLocStart(), Best) ==
OR_No_Viable_Function) {
// In Microsoft mode, if we are inside a template class member function then
// create a type dependent CallExpr. The goal is to postpone name lookup
// to instantiation time to be able to search into type dependent base
// classes.
CallExpr *CE = new (Context) CallExpr(
Context, Fn, Args, Context.DependentTy, VK_RValue, RParenLoc);
CE->setTypeDependent(true);
CE->setValueDependent(true);
CE->setInstantiationDependent(true);
*Result = CE;
return true;
}
}
if (CandidateSet->empty())
return false;
UnbridgedCasts.restore();
return false;
}
/// FinishOverloadedCallExpr - given an OverloadCandidateSet, builds and returns
/// the completed call expression. If overload resolution fails, emits
/// diagnostics and returns ExprError()
static ExprResult FinishOverloadedCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
OverloadCandidateSet *CandidateSet,
OverloadCandidateSet::iterator *Best,
OverloadingResult OverloadResult,
bool AllowTypoCorrection) {
if (CandidateSet->empty())
return BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc, Args,
RParenLoc, /*EmptyLookup=*/true,
AllowTypoCorrection);
switch (OverloadResult) {
case OR_Success: {
FunctionDecl *FDecl = (*Best)->Function;
SemaRef.CheckUnresolvedLookupAccess(ULE, (*Best)->FoundDecl);
if (SemaRef.DiagnoseUseOfDecl(FDecl, ULE->getNameLoc()))
return ExprError();
Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, RParenLoc,
ExecConfig);
}
case OR_No_Viable_Function: {
// Try to recover by looking for viable functions which the user might
// have meant to call.
ExprResult Recovery = BuildRecoveryCallExpr(SemaRef, S, Fn, ULE, LParenLoc,
Args, RParenLoc,
/*EmptyLookup=*/false,
AllowTypoCorrection);
if (!Recovery.isInvalid())
return Recovery;
// If the user passes in a function that we can't take the address of, we
// generally end up emitting really bad error messages. Here, we attempt to
// emit better ones.
for (const Expr *Arg : Args) {
if (!Arg->getType()->isFunctionType())
continue;
if (auto *DRE = dyn_cast<DeclRefExpr>(Arg->IgnoreParenImpCasts())) {
auto *FD = dyn_cast<FunctionDecl>(DRE->getDecl());
if (FD &&
!SemaRef.checkAddressOfFunctionIsAvailable(FD, /*Complain=*/true,
Arg->getExprLoc()))
return ExprError();
}
}
SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_no_viable_function_in_call)
<< ULE->getName() << Fn->getSourceRange();
CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, Args);
break;
}
case OR_Ambiguous:
SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_ambiguous_call)
<< ULE->getName() << Fn->getSourceRange();
CandidateSet->NoteCandidates(SemaRef, OCD_ViableCandidates, Args);
break;
case OR_Deleted: {
SemaRef.Diag(Fn->getLocStart(), diag::err_ovl_deleted_call)
<< (*Best)->Function->isDeleted()
<< ULE->getName()
<< SemaRef.getDeletedOrUnavailableSuffix((*Best)->Function)
<< Fn->getSourceRange();
CandidateSet->NoteCandidates(SemaRef, OCD_AllCandidates, Args);
// We emitted an error for the unavailable/deleted function call but keep
// the call in the AST.
FunctionDecl *FDecl = (*Best)->Function;
Fn = SemaRef.FixOverloadedFunctionReference(Fn, (*Best)->FoundDecl, FDecl);
return SemaRef.BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, RParenLoc,
ExecConfig);
}
}
// Overload resolution failed.
return ExprError();
}
static void markUnaddressableCandidatesUnviable(Sema &S,
OverloadCandidateSet &CS) {
for (auto I = CS.begin(), E = CS.end(); I != E; ++I) {
if (I->Viable &&
!S.checkAddressOfFunctionIsAvailable(I->Function, /*Complain=*/false)) {
I->Viable = false;
I->FailureKind = ovl_fail_addr_not_available;
}
}
}
/// BuildOverloadedCallExpr - Given the call expression that calls Fn
/// (which eventually refers to the declaration Func) and the call
/// arguments Args/NumArgs, attempt to resolve the function call down
/// to a specific function. If overload resolution succeeds, returns
/// the call expression produced by overload resolution.
/// Otherwise, emits diagnostics and returns ExprError.
ExprResult Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection,
bool CalleesAddressIsTaken) {
OverloadCandidateSet CandidateSet(Fn->getExprLoc(),
OverloadCandidateSet::CSK_Normal);
ExprResult result;
if (buildOverloadedCallSet(S, Fn, ULE, Args, LParenLoc, &CandidateSet,
&result))
return result;
// If the user handed us something like `(&Foo)(Bar)`, we need to ensure that
// functions that aren't addressible are considered unviable.
if (CalleesAddressIsTaken)
markUnaddressableCandidatesUnviable(*this, CandidateSet);
OverloadCandidateSet::iterator Best;
OverloadingResult OverloadResult =
CandidateSet.BestViableFunction(*this, Fn->getLocStart(), Best);
return FinishOverloadedCallExpr(*this, S, Fn, ULE, LParenLoc, Args,
RParenLoc, ExecConfig, &CandidateSet,
&Best, OverloadResult,
AllowTypoCorrection);
}
static bool IsOverloaded(const UnresolvedSetImpl &Functions) {
return Functions.size() > 1 ||
(Functions.size() == 1 && isa<FunctionTemplateDecl>(*Functions.begin()));
}
/// Create a unary operation that may resolve to an overloaded
/// operator.
///
/// \param OpLoc The location of the operator itself (e.g., '*').
///
/// \param Opc The UnaryOperatorKind that describes this operator.
///
/// \param Fns The set of non-member functions that will be
/// considered by overload resolution. The caller needs to build this
/// set based on the context using, e.g.,
/// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This
/// set should not contain any member functions; those will be added
/// by CreateOverloadedUnaryOp().
///
/// \param Input The input argument.
ExprResult
Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *Input, bool PerformADL) {
OverloadedOperatorKind Op = UnaryOperator::getOverloadedOperator(Opc);
assert(Op != OO_None && "Invalid opcode for overloaded unary operator");
DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
// TODO: provide better source location info.
DeclarationNameInfo OpNameInfo(OpName, OpLoc);
if (checkPlaceholderForOverload(*this, Input))
return ExprError();
Expr *Args[2] = { Input, nullptr };
unsigned NumArgs = 1;
// For post-increment and post-decrement, add the implicit '0' as
// the second argument, so that we know this is a post-increment or
// post-decrement.
if (Opc == UO_PostInc || Opc == UO_PostDec) {
llvm::APSInt Zero(Context.getTypeSize(Context.IntTy), false);
Args[1] = IntegerLiteral::Create(Context, Zero, Context.IntTy,
SourceLocation());
NumArgs = 2;
}
ArrayRef<Expr *> ArgsArray(Args, NumArgs);
if (Input->isTypeDependent()) {
if (Fns.empty())
return new (Context) UnaryOperator(Input, Opc, Context.DependentTy,
VK_RValue, OK_Ordinary, OpLoc, false);
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
UnresolvedLookupExpr *Fn
= UnresolvedLookupExpr::Create(Context, NamingClass,
NestedNameSpecifierLoc(), OpNameInfo,
/*ADL*/ true, IsOverloaded(Fns),
Fns.begin(), Fns.end());
return new (Context)
CXXOperatorCallExpr(Context, Op, Fn, ArgsArray, Context.DependentTy,
VK_RValue, OpLoc, FPOptions());
}
// Build an empty overload set.
OverloadCandidateSet CandidateSet(OpLoc, OverloadCandidateSet::CSK_Operator);
// Add the candidates from the given function set.
AddFunctionCandidates(Fns, ArgsArray, CandidateSet);
// Add operator candidates that are member functions.
AddMemberOperatorCandidates(Op, OpLoc, ArgsArray, CandidateSet);
// Add candidates from ADL.
if (PerformADL) {
AddArgumentDependentLookupCandidates(OpName, OpLoc, ArgsArray,
/*ExplicitTemplateArgs*/nullptr,
CandidateSet);
}
// Add builtin operator candidates.
AddBuiltinOperatorCandidates(Op, OpLoc, ArgsArray, CandidateSet);
bool HadMultipleCandidates = (CandidateSet.size() > 1);
// Perform overload resolution.
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(*this, OpLoc, Best)) {
case OR_Success: {
// We found a built-in operator or an overloaded operator.
FunctionDecl *FnDecl = Best->Function;
if (FnDecl) {
Expr *Base = nullptr;
// We matched an overloaded operator. Build a call to that
// operator.
// Convert the arguments.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
CheckMemberOperatorAccess(OpLoc, Args[0], nullptr, Best->FoundDecl);
ExprResult InputRes =
PerformObjectArgumentInitialization(Input, /*Qualifier=*/nullptr,
Best->FoundDecl, Method);
if (InputRes.isInvalid())
return ExprError();
Base = Input = InputRes.get();
} else {
// Convert the arguments.
ExprResult InputInit
= PerformCopyInitialization(InitializedEntity::InitializeParameter(
Context,
FnDecl->getParamDecl(0)),
SourceLocation(),
Input);
if (InputInit.isInvalid())
return ExprError();
Input = InputInit.get();
}
// Build the actual expression node.
ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl, Best->FoundDecl,
Base, HadMultipleCandidates,
OpLoc);
if (FnExpr.isInvalid())
return ExprError();
// Determine the result type.
QualType ResultTy = FnDecl->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
Args[0] = Input;
CallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.get(), ArgsArray,
ResultTy, VK, OpLoc, FPOptions());
if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall, FnDecl))
return ExprError();
if (CheckFunctionCall(FnDecl, TheCall,
FnDecl->getType()->castAs<FunctionProtoType>()))
return ExprError();
return MaybeBindToTemporary(TheCall);
} else {
// We matched a built-in operator. Convert the arguments, then
// break out so that we will build the appropriate built-in
// operator node.
ExprResult InputRes = PerformImplicitConversion(
Input, Best->BuiltinParamTypes[0], Best->Conversions[0], AA_Passing,
CCK_ForBuiltinOverloadedOp);
if (InputRes.isInvalid())
return ExprError();
Input = InputRes.get();
break;
}
}
case OR_No_Viable_Function:
// This is an erroneous use of an operator which can be overloaded by
// a non-member function. Check for non-member operators which were
// defined too late to be candidates.
if (DiagnoseTwoPhaseOperatorLookup(*this, Op, OpLoc, ArgsArray))
// FIXME: Recover by calling the found function.
return ExprError();
// No viable function; fall through to handling this as a
// built-in operator, which will produce an error message for us.
break;
case OR_Ambiguous:
Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary)
<< UnaryOperator::getOpcodeStr(Opc)
<< Input->getType()
<< Input->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, ArgsArray,
UnaryOperator::getOpcodeStr(Opc), OpLoc);
return ExprError();
case OR_Deleted:
Diag(OpLoc, diag::err_ovl_deleted_oper)
<< Best->Function->isDeleted()
<< UnaryOperator::getOpcodeStr(Opc)
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Input->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, ArgsArray,
UnaryOperator::getOpcodeStr(Opc), OpLoc);
return ExprError();
}
// Either we found no viable overloaded operator or we matched a
// built-in operator. In either case, fall through to trying to
// build a built-in operation.
return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
}
/// Create a binary operation that may resolve to an overloaded
/// operator.
///
/// \param OpLoc The location of the operator itself (e.g., '+').
///
/// \param Opc The BinaryOperatorKind that describes this operator.
///
/// \param Fns The set of non-member functions that will be
/// considered by overload resolution. The caller needs to build this
/// set based on the context using, e.g.,
/// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This
/// set should not contain any member functions; those will be added
/// by CreateOverloadedBinOp().
///
/// \param LHS Left-hand argument.
/// \param RHS Right-hand argument.
ExprResult
Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS, bool PerformADL) {
Expr *Args[2] = { LHS, RHS };
LHS=RHS=nullptr; // Please use only Args instead of LHS/RHS couple
OverloadedOperatorKind Op = BinaryOperator::getOverloadedOperator(Opc);
DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
// If either side is type-dependent, create an appropriate dependent
// expression.
if (Args[0]->isTypeDependent() || Args[1]->isTypeDependent()) {
if (Fns.empty()) {
// If there are no functions to store, just build a dependent
// BinaryOperator or CompoundAssignment.
if (Opc <= BO_Assign || Opc > BO_OrAssign)
return new (Context) BinaryOperator(
Args[0], Args[1], Opc, Context.DependentTy, VK_RValue, OK_Ordinary,
OpLoc, FPFeatures);
return new (Context) CompoundAssignOperator(
Args[0], Args[1], Opc, Context.DependentTy, VK_LValue, OK_Ordinary,
Context.DependentTy, Context.DependentTy, OpLoc,
FPFeatures);
}
// FIXME: save results of ADL from here?
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
// TODO: provide better source location info in DNLoc component.
DeclarationNameInfo OpNameInfo(OpName, OpLoc);
UnresolvedLookupExpr *Fn
= UnresolvedLookupExpr::Create(Context, NamingClass,
NestedNameSpecifierLoc(), OpNameInfo,
/*ADL*/PerformADL, IsOverloaded(Fns),
Fns.begin(), Fns.end());
return new (Context)
CXXOperatorCallExpr(Context, Op, Fn, Args, Context.DependentTy,
VK_RValue, OpLoc, FPFeatures);
}
// Always do placeholder-like conversions on the RHS.
if (checkPlaceholderForOverload(*this, Args[1]))
return ExprError();
// Do placeholder-like conversion on the LHS; note that we should
// not get here with a PseudoObject LHS.
assert(Args[0]->getObjectKind() != OK_ObjCProperty);
if (checkPlaceholderForOverload(*this, Args[0]))
return ExprError();
// If this is the assignment operator, we only perform overload resolution
// if the left-hand side is a class or enumeration type. This is actually
// a hack. The standard requires that we do overload resolution between the
// various built-in candidates, but as DR507 points out, this can lead to
// problems. So we do it this way, which pretty much follows what GCC does.
// Note that we go the traditional code path for compound assignment forms.
if (Opc == BO_Assign && !Args[0]->getType()->isOverloadableType())
return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
// If this is the .* operator, which is not overloadable, just
// create a built-in binary operator.
if (Opc == BO_PtrMemD)
return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
// Build an empty overload set.
OverloadCandidateSet CandidateSet(OpLoc, OverloadCandidateSet::CSK_Operator);
// Add the candidates from the given function set.
AddFunctionCandidates(Fns, Args, CandidateSet);
// Add operator candidates that are member functions.
AddMemberOperatorCandidates(Op, OpLoc, Args, CandidateSet);
// Add candidates from ADL. Per [over.match.oper]p2, this lookup is not
// performed for an assignment operator (nor for operator[] nor operator->,
// which don't get here).
if (Opc != BO_Assign && PerformADL)
AddArgumentDependentLookupCandidates(OpName, OpLoc, Args,
/*ExplicitTemplateArgs*/ nullptr,
CandidateSet);
// Add builtin operator candidates.
AddBuiltinOperatorCandidates(Op, OpLoc, Args, CandidateSet);
bool HadMultipleCandidates = (CandidateSet.size() > 1);
// Perform overload resolution.
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(*this, OpLoc, Best)) {
case OR_Success: {
// We found a built-in operator or an overloaded operator.
FunctionDecl *FnDecl = Best->Function;
if (FnDecl) {
Expr *Base = nullptr;
// We matched an overloaded operator. Build a call to that
// operator.
// Convert the arguments.
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
// Best->Access is only meaningful for class members.
CheckMemberOperatorAccess(OpLoc, Args[0], Args[1], Best->FoundDecl);
ExprResult Arg1 =
PerformCopyInitialization(
InitializedEntity::InitializeParameter(Context,
FnDecl->getParamDecl(0)),
SourceLocation(), Args[1]);
if (Arg1.isInvalid())
return ExprError();
ExprResult Arg0 =
PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/nullptr,
Best->FoundDecl, Method);
if (Arg0.isInvalid())
return ExprError();
Base = Args[0] = Arg0.getAs<Expr>();
Args[1] = RHS = Arg1.getAs<Expr>();
} else {
// Convert the arguments.
ExprResult Arg0 = PerformCopyInitialization(
InitializedEntity::InitializeParameter(Context,
FnDecl->getParamDecl(0)),
SourceLocation(), Args[0]);
if (Arg0.isInvalid())
return ExprError();
ExprResult Arg1 =
PerformCopyInitialization(
InitializedEntity::InitializeParameter(Context,
FnDecl->getParamDecl(1)),
SourceLocation(), Args[1]);
if (Arg1.isInvalid())
return ExprError();
Args[0] = LHS = Arg0.getAs<Expr>();
Args[1] = RHS = Arg1.getAs<Expr>();
}
// Build the actual expression node.
ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl,
Best->FoundDecl, Base,
HadMultipleCandidates, OpLoc);
if (FnExpr.isInvalid())
return ExprError();
// Determine the result type.
QualType ResultTy = FnDecl->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
CXXOperatorCallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.get(),
Args, ResultTy, VK, OpLoc,
FPFeatures);
if (CheckCallReturnType(FnDecl->getReturnType(), OpLoc, TheCall,
FnDecl))
return ExprError();
ArrayRef<const Expr *> ArgsArray(Args, 2);
const Expr *ImplicitThis = nullptr;
// Cut off the implicit 'this'.
if (isa<CXXMethodDecl>(FnDecl)) {
ImplicitThis = ArgsArray[0];
ArgsArray = ArgsArray.slice(1);
}
// Check for a self move.
if (Op == OO_Equal)
DiagnoseSelfMove(Args[0], Args[1], OpLoc);
checkCall(FnDecl, nullptr, ImplicitThis, ArgsArray,
isa<CXXMethodDecl>(FnDecl), OpLoc, TheCall->getSourceRange(),
VariadicDoesNotApply);
return MaybeBindToTemporary(TheCall);
} else {
// We matched a built-in operator. Convert the arguments, then
// break out so that we will build the appropriate built-in
// operator node.
ExprResult ArgsRes0 = PerformImplicitConversion(
Args[0], Best->BuiltinParamTypes[0], Best->Conversions[0],
AA_Passing, CCK_ForBuiltinOverloadedOp);
if (ArgsRes0.isInvalid())
return ExprError();
Args[0] = ArgsRes0.get();
ExprResult ArgsRes1 = PerformImplicitConversion(
Args[1], Best->BuiltinParamTypes[1], Best->Conversions[1],
AA_Passing, CCK_ForBuiltinOverloadedOp);
if (ArgsRes1.isInvalid())
return ExprError();
Args[1] = ArgsRes1.get();
break;
}
}
case OR_No_Viable_Function: {
// C++ [over.match.oper]p9:
// If the operator is the operator , [...] and there are no
// viable functions, then the operator is assumed to be the
// built-in operator and interpreted according to clause 5.
if (Opc == BO_Comma)
break;
// For class as left operand for assignment or compound assignment
// operator do not fall through to handling in built-in, but report that
// no overloaded assignment operator found
ExprResult Result = ExprError();
if (Args[0]->getType()->isRecordType() &&
Opc >= BO_Assign && Opc <= BO_OrAssign) {
Diag(OpLoc, diag::err_ovl_no_viable_oper)
<< BinaryOperator::getOpcodeStr(Opc)
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
if (Args[0]->getType()->isIncompleteType()) {
Diag(OpLoc, diag::note_assign_lhs_incomplete)
<< Args[0]->getType()
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
}
} else {
// This is an erroneous use of an operator which can be overloaded by
// a non-member function. Check for non-member operators which were
// defined too late to be candidates.
if (DiagnoseTwoPhaseOperatorLookup(*this, Op, OpLoc, Args))
// FIXME: Recover by calling the found function.
return ExprError();
// No viable function; try to create a built-in operation, which will
// produce an error. Then, show the non-viable candidates.
Result = CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
}
assert(Result.isInvalid() &&
"C++ binary operator overloading is missing candidates!");
if (Result.isInvalid())
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
BinaryOperator::getOpcodeStr(Opc), OpLoc);
return Result;
}
case OR_Ambiguous:
Diag(OpLoc, diag::err_ovl_ambiguous_oper_binary)
<< BinaryOperator::getOpcodeStr(Opc)
<< Args[0]->getType() << Args[1]->getType()
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args,
BinaryOperator::getOpcodeStr(Opc), OpLoc);
return ExprError();
case OR_Deleted:
if (isImplicitlyDeleted(Best->Function)) {
CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
Diag(OpLoc, diag::err_ovl_deleted_special_oper)
<< Context.getRecordType(Method->getParent())
<< getSpecialMember(Method);
// The user probably meant to call this special member. Just
// explain why it's deleted.
NoteDeletedFunction(Method);
return ExprError();
} else {
Diag(OpLoc, diag::err_ovl_deleted_oper)
<< Best->Function->isDeleted()
<< BinaryOperator::getOpcodeStr(Opc)
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
}
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
BinaryOperator::getOpcodeStr(Opc), OpLoc);
return ExprError();
}
// We matched a built-in operator; build it.
return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
}
ExprResult
Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base, Expr *Idx) {
Expr *Args[2] = { Base, Idx };
DeclarationName OpName =
Context.DeclarationNames.getCXXOperatorName(OO_Subscript);
// If either side is type-dependent, create an appropriate dependent
// expression.
if (Args[0]->isTypeDependent() || Args[1]->isTypeDependent()) {
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
// CHECKME: no 'operator' keyword?
DeclarationNameInfo OpNameInfo(OpName, LLoc);
OpNameInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc));
UnresolvedLookupExpr *Fn
= UnresolvedLookupExpr::Create(Context, NamingClass,
NestedNameSpecifierLoc(), OpNameInfo,
/*ADL*/ true, /*Overloaded*/ false,
UnresolvedSetIterator(),
UnresolvedSetIterator());
// Can't add any actual overloads yet
return new (Context)
CXXOperatorCallExpr(Context, OO_Subscript, Fn, Args,
Context.DependentTy, VK_RValue, RLoc, FPOptions());
}
// Handle placeholders on both operands.
if (checkPlaceholderForOverload(*this, Args[0]))
return ExprError();
if (checkPlaceholderForOverload(*this, Args[1]))
return ExprError();
// Build an empty overload set.
OverloadCandidateSet CandidateSet(LLoc, OverloadCandidateSet::CSK_Operator);
// Subscript can only be overloaded as a member function.
// Add operator candidates that are member functions.
AddMemberOperatorCandidates(OO_Subscript, LLoc, Args, CandidateSet);
// Add builtin operator candidates.
AddBuiltinOperatorCandidates(OO_Subscript, LLoc, Args, CandidateSet);
bool HadMultipleCandidates = (CandidateSet.size() > 1);
// Perform overload resolution.
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(*this, LLoc, Best)) {
case OR_Success: {
// We found a built-in operator or an overloaded operator.
FunctionDecl *FnDecl = Best->Function;
if (FnDecl) {
// We matched an overloaded operator. Build a call to that
// operator.
CheckMemberOperatorAccess(LLoc, Args[0], Args[1], Best->FoundDecl);
// Convert the arguments.
CXXMethodDecl *Method = cast<CXXMethodDecl>(FnDecl);
ExprResult Arg0 =
PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/nullptr,
Best->FoundDecl, Method);
if (Arg0.isInvalid())
return ExprError();
Args[0] = Arg0.get();
// Convert the arguments.
ExprResult InputInit
= PerformCopyInitialization(InitializedEntity::InitializeParameter(
Context,
FnDecl->getParamDecl(0)),
SourceLocation(),
Args[1]);
if (InputInit.isInvalid())
return ExprError();
Args[1] = InputInit.getAs<Expr>();
// Build the actual expression node.
DeclarationNameInfo OpLocInfo(OpName, LLoc);
OpLocInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc));
ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl,
Best->FoundDecl,
Base,
HadMultipleCandidates,
OpLocInfo.getLoc(),
OpLocInfo.getInfo());
if (FnExpr.isInvalid())
return ExprError();
// Determine the result type
QualType ResultTy = FnDecl->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
CXXOperatorCallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, OO_Subscript,
FnExpr.get(), Args,
ResultTy, VK, RLoc,
FPOptions());
if (CheckCallReturnType(FnDecl->getReturnType(), LLoc, TheCall, FnDecl))
return ExprError();
if (CheckFunctionCall(Method, TheCall,
Method->getType()->castAs<FunctionProtoType>()))
return ExprError();
return MaybeBindToTemporary(TheCall);
} else {
// We matched a built-in operator. Convert the arguments, then
// break out so that we will build the appropriate built-in
// operator node.
ExprResult ArgsRes0 = PerformImplicitConversion(
Args[0], Best->BuiltinParamTypes[0], Best->Conversions[0],
AA_Passing, CCK_ForBuiltinOverloadedOp);
if (ArgsRes0.isInvalid())
return ExprError();
Args[0] = ArgsRes0.get();
ExprResult ArgsRes1 = PerformImplicitConversion(
Args[1], Best->BuiltinParamTypes[1], Best->Conversions[1],
AA_Passing, CCK_ForBuiltinOverloadedOp);
if (ArgsRes1.isInvalid())
return ExprError();
Args[1] = ArgsRes1.get();
break;
}
}
case OR_No_Viable_Function: {
if (CandidateSet.empty())
Diag(LLoc, diag::err_ovl_no_oper)
<< Args[0]->getType() << /*subscript*/ 0
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
else
Diag(LLoc, diag::err_ovl_no_viable_subscript)
<< Args[0]->getType()
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
"[]", LLoc);
return ExprError();
}
case OR_Ambiguous:
Diag(LLoc, diag::err_ovl_ambiguous_oper_binary)
<< "[]"
<< Args[0]->getType() << Args[1]->getType()
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args,
"[]", LLoc);
return ExprError();
case OR_Deleted:
Diag(LLoc, diag::err_ovl_deleted_oper)
<< Best->Function->isDeleted() << "[]"
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Args[0]->getSourceRange() << Args[1]->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
"[]", LLoc);
return ExprError();
}
// We matched a built-in operator; build it.
return CreateBuiltinArraySubscriptExpr(Args[0], LLoc, Args[1], RLoc);
}
/// BuildCallToMemberFunction - Build a call to a member
/// function. MemExpr is the expression that refers to the member
/// function (and includes the object parameter), Args/NumArgs are the
/// arguments to the function call (not including the object
/// parameter). The caller needs to validate that the member
/// expression refers to a non-static member function or an overloaded
/// member function.
ExprResult
Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc) {
assert(MemExprE->getType() == Context.BoundMemberTy ||
MemExprE->getType() == Context.OverloadTy);
// Dig out the member expression. This holds both the object
// argument and the member function we're referring to.
Expr *NakedMemExpr = MemExprE->IgnoreParens();
// Determine whether this is a call to a pointer-to-member function.
if (BinaryOperator *op = dyn_cast<BinaryOperator>(NakedMemExpr)) {
assert(op->getType() == Context.BoundMemberTy);
assert(op->getOpcode() == BO_PtrMemD || op->getOpcode() == BO_PtrMemI);
QualType fnType =
op->getRHS()->getType()->castAs<MemberPointerType>()->getPointeeType();
const FunctionProtoType *proto = fnType->castAs<FunctionProtoType>();
QualType resultType = proto->getCallResultType(Context);
ExprValueKind valueKind = Expr::getValueKindForType(proto->getReturnType());
// Check that the object type isn't more qualified than the
// member function we're calling.
Qualifiers funcQuals = Qualifiers::fromCVRMask(proto->getTypeQuals());
QualType objectType = op->getLHS()->getType();
if (op->getOpcode() == BO_PtrMemI)
objectType = objectType->castAs<PointerType>()->getPointeeType();
Qualifiers objectQuals = objectType.getQualifiers();
Qualifiers difference = objectQuals - funcQuals;
difference.removeObjCGCAttr();
difference.removeAddressSpace();
if (difference) {
std::string qualsString = difference.getAsString();
Diag(LParenLoc, diag::err_pointer_to_member_call_drops_quals)
<< fnType.getUnqualifiedType()
<< qualsString
<< (qualsString.find(' ') == std::string::npos ? 1 : 2);
}
CXXMemberCallExpr *call
= new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
resultType, valueKind, RParenLoc);
if (CheckCallReturnType(proto->getReturnType(), op->getRHS()->getLocStart(),
call, nullptr))
return ExprError();
if (ConvertArgumentsForCall(call, op, nullptr, proto, Args, RParenLoc))
return ExprError();
if (CheckOtherCall(call, proto))
return ExprError();
return MaybeBindToTemporary(call);
}
if (isa<CXXPseudoDestructorExpr>(NakedMemExpr))
return new (Context)
CallExpr(Context, MemExprE, Args, Context.VoidTy, VK_RValue, RParenLoc);
UnbridgedCastsSet UnbridgedCasts;
if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts))
return ExprError();
MemberExpr *MemExpr;
CXXMethodDecl *Method = nullptr;
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_public);
NestedNameSpecifier *Qualifier = nullptr;
if (isa<MemberExpr>(NakedMemExpr)) {
MemExpr = cast<MemberExpr>(NakedMemExpr);
Method = cast<CXXMethodDecl>(MemExpr->getMemberDecl());
FoundDecl = MemExpr->getFoundDecl();
Qualifier = MemExpr->getQualifier();
UnbridgedCasts.restore();
} else {
UnresolvedMemberExpr *UnresExpr = cast<UnresolvedMemberExpr>(NakedMemExpr);
Qualifier = UnresExpr->getQualifier();
QualType ObjectType = UnresExpr->getBaseType();
Expr::Classification ObjectClassification
= UnresExpr->isArrow()? Expr::Classification::makeSimpleLValue()
: UnresExpr->getBase()->Classify(Context);
// Add overload candidates
OverloadCandidateSet CandidateSet(UnresExpr->getMemberLoc(),
OverloadCandidateSet::CSK_Normal);
// FIXME: avoid copy.
TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr;
if (UnresExpr->hasExplicitTemplateArgs()) {
UnresExpr->copyTemplateArgumentsInto(TemplateArgsBuffer);
TemplateArgs = &TemplateArgsBuffer;
}
for (UnresolvedMemberExpr::decls_iterator I = UnresExpr->decls_begin(),
E = UnresExpr->decls_end(); I != E; ++I) {
NamedDecl *Func = *I;
CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(Func->getDeclContext());
if (isa<UsingShadowDecl>(Func))
Func = cast<UsingShadowDecl>(Func)->getTargetDecl();
// Microsoft supports direct constructor calls.
if (getLangOpts().MicrosoftExt && isa<CXXConstructorDecl>(Func)) {
AddOverloadCandidate(cast<CXXConstructorDecl>(Func), I.getPair(),
Args, CandidateSet);
} else if ((Method = dyn_cast<CXXMethodDecl>(Func))) {
// If explicit template arguments were provided, we can't call a
// non-template member function.
if (TemplateArgs)
continue;
AddMethodCandidate(Method, I.getPair(), ActingDC, ObjectType,
ObjectClassification, Args, CandidateSet,
/*SuppressUserConversions=*/false);
} else {
AddMethodTemplateCandidate(
cast<FunctionTemplateDecl>(Func), I.getPair(), ActingDC,
TemplateArgs, ObjectType, ObjectClassification, Args, CandidateSet,
/*SuppressUsedConversions=*/false);
}
}
DeclarationName DeclName = UnresExpr->getMemberName();
UnbridgedCasts.restore();
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(*this, UnresExpr->getLocStart(),
Best)) {
case OR_Success:
Method = cast<CXXMethodDecl>(Best->Function);
FoundDecl = Best->FoundDecl;
CheckUnresolvedMemberAccess(UnresExpr, Best->FoundDecl);
if (DiagnoseUseOfDecl(Best->FoundDecl, UnresExpr->getNameLoc()))
return ExprError();
// If FoundDecl is different from Method (such as if one is a template
// and the other a specialization), make sure DiagnoseUseOfDecl is
// called on both.
// FIXME: This would be more comprehensively addressed by modifying
// DiagnoseUseOfDecl to accept both the FoundDecl and the decl
// being used.
if (Method != FoundDecl.getDecl() &&
DiagnoseUseOfDecl(Method, UnresExpr->getNameLoc()))
return ExprError();
break;
case OR_No_Viable_Function:
Diag(UnresExpr->getMemberLoc(),
diag::err_ovl_no_viable_member_function_in_call)
<< DeclName << MemExprE->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
// FIXME: Leaking incoming expressions!
return ExprError();
case OR_Ambiguous:
Diag(UnresExpr->getMemberLoc(), diag::err_ovl_ambiguous_member_call)
<< DeclName << MemExprE->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
// FIXME: Leaking incoming expressions!
return ExprError();
case OR_Deleted:
Diag(UnresExpr->getMemberLoc(), diag::err_ovl_deleted_member_call)
<< Best->Function->isDeleted()
<< DeclName
<< getDeletedOrUnavailableSuffix(Best->Function)
<< MemExprE->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
// FIXME: Leaking incoming expressions!
return ExprError();
}
MemExprE = FixOverloadedFunctionReference(MemExprE, FoundDecl, Method);
// If overload resolution picked a static member, build a
// non-member call based on that function.
if (Method->isStatic()) {
return BuildResolvedCallExpr(MemExprE, Method, LParenLoc, Args,
RParenLoc);
}
MemExpr = cast<MemberExpr>(MemExprE->IgnoreParens());
}
QualType ResultType = Method->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultType);
ResultType = ResultType.getNonLValueExprType(Context);
assert(Method && "Member call to something that isn't a method?");
CXXMemberCallExpr *TheCall =
new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
ResultType, VK, RParenLoc);
// Check for a valid return type.
if (CheckCallReturnType(Method->getReturnType(), MemExpr->getMemberLoc(),
TheCall, Method))
return ExprError();
// Convert the object argument (for a non-static member function call).
// We only need to do this if there was actually an overload; otherwise
// it was done at lookup.
if (!Method->isStatic()) {
ExprResult ObjectArg =
PerformObjectArgumentInitialization(MemExpr->getBase(), Qualifier,
FoundDecl, Method);
if (ObjectArg.isInvalid())
return ExprError();
MemExpr->setBase(ObjectArg.get());
}
// Convert the rest of the arguments
const FunctionProtoType *Proto =
Method->getType()->getAs<FunctionProtoType>();
if (ConvertArgumentsForCall(TheCall, MemExpr, Method, Proto, Args,
RParenLoc))
return ExprError();
DiagnoseSentinelCalls(Method, LParenLoc, Args);
if (CheckFunctionCall(Method, TheCall, Proto))
return ExprError();
// In the case the method to call was not selected by the overloading
// resolution process, we still need to handle the enable_if attribute. Do
// that here, so it will not hide previous -- and more relevant -- errors.
if (auto *MemE = dyn_cast<MemberExpr>(NakedMemExpr)) {
if (const EnableIfAttr *Attr = CheckEnableIf(Method, Args, true)) {
Diag(MemE->getMemberLoc(),
diag::err_ovl_no_viable_member_function_in_call)
<< Method << Method->getSourceRange();
Diag(Method->getLocation(),
diag::note_ovl_candidate_disabled_by_function_cond_attr)
<< Attr->getCond()->getSourceRange() << Attr->getMessage();
return ExprError();
}
}
if ((isa<CXXConstructorDecl>(CurContext) ||
isa<CXXDestructorDecl>(CurContext)) &&
TheCall->getMethodDecl()->isPure()) {
const CXXMethodDecl *MD = TheCall->getMethodDecl();
if (isa<CXXThisExpr>(MemExpr->getBase()->IgnoreParenCasts()) &&
MemExpr->performsVirtualDispatch(getLangOpts())) {
Diag(MemExpr->getLocStart(),
diag::warn_call_to_pure_virtual_member_function_from_ctor_dtor)
<< MD->getDeclName() << isa<CXXDestructorDecl>(CurContext)
<< MD->getParent()->getDeclName();
Diag(MD->getLocStart(), diag::note_previous_decl) << MD->getDeclName();
if (getLangOpts().AppleKext)
Diag(MemExpr->getLocStart(),
diag::note_pure_qualified_call_kext)
<< MD->getParent()->getDeclName()
<< MD->getDeclName();
}
}
if (CXXDestructorDecl *DD =
dyn_cast<CXXDestructorDecl>(TheCall->getMethodDecl())) {
// a->A::f() doesn't go through the vtable, except in AppleKext mode.
bool CallCanBeVirtual = !MemExpr->hasQualifier() || getLangOpts().AppleKext;
CheckVirtualDtorCall(DD, MemExpr->getLocStart(), /*IsDelete=*/false,
CallCanBeVirtual, /*WarnOnNonAbstractTypes=*/true,
MemExpr->getMemberLoc());
}
return MaybeBindToTemporary(TheCall);
}
/// BuildCallToObjectOfClassType - Build a call to an object of class
/// type (C++ [over.call.object]), which can end up invoking an
/// overloaded function call operator (@c operator()) or performing a
/// user-defined conversion on the object argument.
ExprResult
Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc) {
if (checkPlaceholderForOverload(*this, Obj))
return ExprError();
ExprResult Object = Obj;
UnbridgedCastsSet UnbridgedCasts;
if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts))
return ExprError();
assert(Object.get()->getType()->isRecordType() &&
"Requires object type argument");
const RecordType *Record = Object.get()->getType()->getAs<RecordType>();
// C++ [over.call.object]p1:
// If the primary-expression E in the function call syntax
// evaluates to a class object of type "cv T", then the set of
// candidate functions includes at least the function call
// operators of T. The function call operators of T are obtained by
// ordinary lookup of the name operator() in the context of
// (E).operator().
OverloadCandidateSet CandidateSet(LParenLoc,
OverloadCandidateSet::CSK_Operator);
DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(OO_Call);
if (RequireCompleteType(LParenLoc, Object.get()->getType(),
diag::err_incomplete_object_call, Object.get()))
return true;
LookupResult R(*this, OpName, LParenLoc, LookupOrdinaryName);
LookupQualifiedName(R, Record->getDecl());
R.suppressDiagnostics();
for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
Oper != OperEnd; ++Oper) {
AddMethodCandidate(Oper.getPair(), Object.get()->getType(),
Object.get()->Classify(Context), Args, CandidateSet,
/*SuppressUserConversions=*/false);
}
// C++ [over.call.object]p2:
// In addition, for each (non-explicit in C++0x) conversion function
// declared in T of the form
//
// operator conversion-type-id () cv-qualifier;
//
// where cv-qualifier is the same cv-qualification as, or a
// greater cv-qualification than, cv, and where conversion-type-id
// denotes the type "pointer to function of (P1,...,Pn) returning
// R", or the type "reference to pointer to function of
// (P1,...,Pn) returning R", or the type "reference to function
// of (P1,...,Pn) returning R", a surrogate call function [...]
// is also considered as a candidate function. Similarly,
// surrogate call functions are added to the set of candidate
// functions for each conversion function declared in an
// accessible base class provided the function is not hidden
// within T by another intervening declaration.
const auto &Conversions =
cast<CXXRecordDecl>(Record->getDecl())->getVisibleConversionFunctions();
for (auto I = Conversions.begin(), E = Conversions.end(); I != E; ++I) {
NamedDecl *D = *I;
CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext());
if (isa<UsingShadowDecl>(D))
D = cast<UsingShadowDecl>(D)->getTargetDecl();
// Skip over templated conversion functions; they aren't
// surrogates.
if (isa<FunctionTemplateDecl>(D))
continue;
CXXConversionDecl *Conv = cast<CXXConversionDecl>(D);
if (!Conv->isExplicit()) {
// Strip the reference type (if any) and then the pointer type (if
// any) to get down to what might be a function type.
QualType ConvType = Conv->getConversionType().getNonReferenceType();
if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
ConvType = ConvPtrType->getPointeeType();
if (const FunctionProtoType *Proto = ConvType->getAs<FunctionProtoType>())
{
AddSurrogateCandidate(Conv, I.getPair(), ActingContext, Proto,
Object.get(), Args, CandidateSet);
}
}
}
bool HadMultipleCandidates = (CandidateSet.size() > 1);
// Perform overload resolution.
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(*this, Object.get()->getLocStart(),
Best)) {
case OR_Success:
// Overload resolution succeeded; we'll build the appropriate call
// below.
break;
case OR_No_Viable_Function:
if (CandidateSet.empty())
Diag(Object.get()->getLocStart(), diag::err_ovl_no_oper)
<< Object.get()->getType() << /*call*/ 1
<< Object.get()->getSourceRange();
else
Diag(Object.get()->getLocStart(),
diag::err_ovl_no_viable_object_call)
<< Object.get()->getType() << Object.get()->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
break;
case OR_Ambiguous:
Diag(Object.get()->getLocStart(),
diag::err_ovl_ambiguous_object_call)
<< Object.get()->getType() << Object.get()->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args);
break;
case OR_Deleted:
Diag(Object.get()->getLocStart(),
diag::err_ovl_deleted_object_call)
<< Best->Function->isDeleted()
<< Object.get()->getType()
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Object.get()->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
break;
}
if (Best == CandidateSet.end())
return true;
UnbridgedCasts.restore();
if (Best->Function == nullptr) {
// Since there is no function declaration, this is one of the
// surrogate candidates. Dig out the conversion function.
CXXConversionDecl *Conv
= cast<CXXConversionDecl>(
Best->Conversions[0].UserDefined.ConversionFunction);
CheckMemberOperatorAccess(LParenLoc, Object.get(), nullptr,
Best->FoundDecl);
if (DiagnoseUseOfDecl(Best->FoundDecl, LParenLoc))
return ExprError();
assert(Conv == Best->FoundDecl.getDecl() &&
"Found Decl & conversion-to-functionptr should be same, right?!");
// We selected one of the surrogate functions that converts the
// object parameter to a function pointer. Perform the conversion
// on the object argument, then let ActOnCallExpr finish the job.
// Create an implicit member expr to refer to the conversion operator.
// and then call it.
ExprResult Call = BuildCXXMemberCallExpr(Object.get(), Best->FoundDecl,
Conv, HadMultipleCandidates);
if (Call.isInvalid())
return ExprError();
// Record usage of conversion in an implicit cast.
Call = ImplicitCastExpr::Create(Context, Call.get()->getType(),
CK_UserDefinedConversion, Call.get(),
nullptr, VK_RValue);
return ActOnCallExpr(S, Call.get(), LParenLoc, Args, RParenLoc);
}
CheckMemberOperatorAccess(LParenLoc, Object.get(), nullptr, Best->FoundDecl);
// We found an overloaded operator(). Build a CXXOperatorCallExpr
// that calls this method, using Object for the implicit object
// parameter and passing along the remaining arguments.
CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
// An error diagnostic has already been printed when parsing the declaration.
if (Method->isInvalidDecl())
return ExprError();
const FunctionProtoType *Proto =
Method->getType()->getAs<FunctionProtoType>();
unsigned NumParams = Proto->getNumParams();
DeclarationNameInfo OpLocInfo(
Context.DeclarationNames.getCXXOperatorName(OO_Call), LParenLoc);
OpLocInfo.setCXXOperatorNameRange(SourceRange(LParenLoc, RParenLoc));
ExprResult NewFn = CreateFunctionRefExpr(*this, Method, Best->FoundDecl,
Obj, HadMultipleCandidates,
OpLocInfo.getLoc(),
OpLocInfo.getInfo());
if (NewFn.isInvalid())
return true;
// Build the full argument list for the method call (the implicit object
// parameter is placed at the beginning of the list).
SmallVector<Expr *, 8> MethodArgs(Args.size() + 1);
MethodArgs[0] = Object.get();
std::copy(Args.begin(), Args.end(), MethodArgs.begin() + 1);
// Once we've built TheCall, all of the expressions are properly
// owned.
QualType ResultTy = Method->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
CXXOperatorCallExpr *TheCall = new (Context)
CXXOperatorCallExpr(Context, OO_Call, NewFn.get(), MethodArgs, ResultTy,
VK, RParenLoc, FPOptions());
if (CheckCallReturnType(Method->getReturnType(), LParenLoc, TheCall, Method))
return true;
// We may have default arguments. If so, we need to allocate more
// slots in the call for them.
if (Args.size() < NumParams)
TheCall->setNumArgs(Context, NumParams + 1);
bool IsError = false;
// Initialize the implicit object parameter.
ExprResult ObjRes =
PerformObjectArgumentInitialization(Object.get(), /*Qualifier=*/nullptr,
Best->FoundDecl, Method);
if (ObjRes.isInvalid())
IsError = true;
else
Object = ObjRes;
TheCall->setArg(0, Object.get());
// Check the argument types.
for (unsigned i = 0; i != NumParams; i++) {
Expr *Arg;
if (i < Args.size()) {
Arg = Args[i];
// Pass the argument.
ExprResult InputInit
= PerformCopyInitialization(InitializedEntity::InitializeParameter(
Context,
Method->getParamDecl(i)),
SourceLocation(), Arg);
IsError |= InputInit.isInvalid();
Arg = InputInit.getAs<Expr>();
} else {
ExprResult DefArg
= BuildCXXDefaultArgExpr(LParenLoc, Method, Method->getParamDecl(i));
if (DefArg.isInvalid()) {
IsError = true;
break;
}
Arg = DefArg.getAs<Expr>();
}
TheCall->setArg(i + 1, Arg);
}
// If this is a variadic call, handle args passed through "...".
if (Proto->isVariadic()) {
// Promote the arguments (C99 6.5.2.2p7).
for (unsigned i = NumParams, e = Args.size(); i < e; i++) {
ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod,
nullptr);
IsError |= Arg.isInvalid();
TheCall->setArg(i + 1, Arg.get());
}
}
if (IsError) return true;
DiagnoseSentinelCalls(Method, LParenLoc, Args);
if (CheckFunctionCall(Method, TheCall, Proto))
return true;
return MaybeBindToTemporary(TheCall);
}
/// BuildOverloadedArrowExpr - Build a call to an overloaded @c operator->
/// (if one exists), where @c Base is an expression of class type and
/// @c Member is the name of the member we're trying to find.
ExprResult
Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc,
bool *NoArrowOperatorFound) {
assert(Base->getType()->isRecordType() &&
"left-hand side must have class type");
if (checkPlaceholderForOverload(*this, Base))
return ExprError();
SourceLocation Loc = Base->getExprLoc();
// C++ [over.ref]p1:
//
// [...] An expression x->m is interpreted as (x.operator->())->m
// for a class object x of type T if T::operator->() exists and if
// the operator is selected as the best match function by the
// overload resolution mechanism (13.3).
DeclarationName OpName =
Context.DeclarationNames.getCXXOperatorName(OO_Arrow);
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Operator);
const RecordType *BaseRecord = Base->getType()->getAs<RecordType>();
if (RequireCompleteType(Loc, Base->getType(),
diag::err_typecheck_incomplete_tag, Base))
return ExprError();
LookupResult R(*this, OpName, OpLoc, LookupOrdinaryName);
LookupQualifiedName(R, BaseRecord->getDecl());
R.suppressDiagnostics();
for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
Oper != OperEnd; ++Oper) {
AddMethodCandidate(Oper.getPair(), Base->getType(), Base->Classify(Context),
None, CandidateSet, /*SuppressUserConversions=*/false);
}
bool HadMultipleCandidates = (CandidateSet.size() > 1);
// Perform overload resolution.
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(*this, OpLoc, Best)) {
case OR_Success:
// Overload resolution succeeded; we'll build the call below.
break;
case OR_No_Viable_Function:
if (CandidateSet.empty()) {
QualType BaseType = Base->getType();
if (NoArrowOperatorFound) {
// Report this specific error to the caller instead of emitting a
// diagnostic, as requested.
*NoArrowOperatorFound = true;
return ExprError();
}
Diag(OpLoc, diag::err_typecheck_member_reference_arrow)
<< BaseType << Base->getSourceRange();
if (BaseType->isRecordType() && !BaseType->isPointerType()) {
Diag(OpLoc, diag::note_typecheck_member_reference_suggestion)
<< FixItHint::CreateReplacement(OpLoc, ".");
}
} else
Diag(OpLoc, diag::err_ovl_no_viable_oper)
<< "operator->" << Base->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base);
return ExprError();
case OR_Ambiguous:
Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary)
<< "->" << Base->getType() << Base->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Base);
return ExprError();
case OR_Deleted:
Diag(OpLoc, diag::err_ovl_deleted_oper)
<< Best->Function->isDeleted()
<< "->"
<< getDeletedOrUnavailableSuffix(Best->Function)
<< Base->getSourceRange();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base);
return ExprError();
}
CheckMemberOperatorAccess(OpLoc, Base, nullptr, Best->FoundDecl);
// Convert the object parameter.
CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
ExprResult BaseResult =
PerformObjectArgumentInitialization(Base, /*Qualifier=*/nullptr,
Best->FoundDecl, Method);
if (BaseResult.isInvalid())
return ExprError();
Base = BaseResult.get();
// Build the operator call.
ExprResult FnExpr = CreateFunctionRefExpr(*this, Method, Best->FoundDecl,
Base, HadMultipleCandidates, OpLoc);
if (FnExpr.isInvalid())
return ExprError();
QualType ResultTy = Method->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
CXXOperatorCallExpr *TheCall =
new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr.get(),
Base, ResultTy, VK, OpLoc, FPOptions());
if (CheckCallReturnType(Method->getReturnType(), OpLoc, TheCall, Method))
return ExprError();
if (CheckFunctionCall(Method, TheCall,
Method->getType()->castAs<FunctionProtoType>()))
return ExprError();
return MaybeBindToTemporary(TheCall);
}
/// BuildLiteralOperatorCall - Build a UserDefinedLiteral by creating a call to
/// a literal operator described by the provided lookup results.
ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr*> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *TemplateArgs) {
SourceLocation UDSuffixLoc = SuffixInfo.getCXXLiteralOperatorNameLoc();
OverloadCandidateSet CandidateSet(UDSuffixLoc,
OverloadCandidateSet::CSK_Normal);
AddFunctionCandidates(R.asUnresolvedSet(), Args, CandidateSet, TemplateArgs,
/*SuppressUserConversions=*/true);
bool HadMultipleCandidates = (CandidateSet.size() > 1);
// Perform overload resolution. This will usually be trivial, but might need
// to perform substitutions for a literal operator template.
OverloadCandidateSet::iterator Best;
switch (CandidateSet.BestViableFunction(*this, UDSuffixLoc, Best)) {
case OR_Success:
case OR_Deleted:
break;
case OR_No_Viable_Function:
Diag(UDSuffixLoc, diag::err_ovl_no_viable_function_in_call)
<< R.getLookupName();
CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
return ExprError();
case OR_Ambiguous:
Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName();
CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args);
return ExprError();
}
FunctionDecl *FD = Best->Function;
ExprResult Fn = CreateFunctionRefExpr(*this, FD, Best->FoundDecl,
nullptr, HadMultipleCandidates,
SuffixInfo.getLoc(),
SuffixInfo.getInfo());
if (Fn.isInvalid())
return true;
// Check the argument types. This should almost always be a no-op, except
// that array-to-pointer decay is applied to string literals.
Expr *ConvArgs[2];
for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
ExprResult InputInit = PerformCopyInitialization(
InitializedEntity::InitializeParameter(Context, FD->getParamDecl(ArgIdx)),
SourceLocation(), Args[ArgIdx]);
if (InputInit.isInvalid())
return true;
ConvArgs[ArgIdx] = InputInit.get();
}
QualType ResultTy = FD->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultTy);
ResultTy = ResultTy.getNonLValueExprType(Context);
UserDefinedLiteral *UDL =
new (Context) UserDefinedLiteral(Context, Fn.get(),
llvm::makeArrayRef(ConvArgs, Args.size()),
ResultTy, VK, LitEndLoc, UDSuffixLoc);
if (CheckCallReturnType(FD->getReturnType(), UDSuffixLoc, UDL, FD))
return ExprError();
if (CheckFunctionCall(FD, UDL, nullptr))
return ExprError();
return MaybeBindToTemporary(UDL);
}
/// Build a call to 'begin' or 'end' for a C++11 for-range statement. If the
/// given LookupResult is non-empty, it is assumed to describe a member which
/// will be invoked. Otherwise, the function will be found via argument
/// dependent lookup.
/// CallExpr is set to a valid expression and FRS_Success returned on success,
/// otherwise CallExpr is set to ExprError() and some non-success value
/// is returned.
Sema::ForRangeStatus
Sema::BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr) {
Scope *S = nullptr;
CandidateSet->clear(OverloadCandidateSet::CSK_Normal);
if (!MemberLookup.empty()) {
ExprResult MemberRef =
BuildMemberReferenceExpr(Range, Range->getType(), Loc,
/*IsPtr=*/false, CXXScopeSpec(),
/*TemplateKWLoc=*/SourceLocation(),
/*FirstQualifierInScope=*/nullptr,
MemberLookup,
/*TemplateArgs=*/nullptr, S);
if (MemberRef.isInvalid()) {
*CallExpr = ExprError();
return FRS_DiagnosticIssued;
}
*CallExpr = ActOnCallExpr(S, MemberRef.get(), Loc, None, Loc, nullptr);
if (CallExpr->isInvalid()) {
*CallExpr = ExprError();
return FRS_DiagnosticIssued;
}
} else {
UnresolvedSet<0> FoundNames;
UnresolvedLookupExpr *Fn =
UnresolvedLookupExpr::Create(Context, /*NamingClass=*/nullptr,
NestedNameSpecifierLoc(), NameInfo,
/*NeedsADL=*/true, /*Overloaded=*/false,
FoundNames.begin(), FoundNames.end());
bool CandidateSetError = buildOverloadedCallSet(S, Fn, Fn, Range, Loc,
CandidateSet, CallExpr);
if (CandidateSet->empty() || CandidateSetError) {
*CallExpr = ExprError();
return FRS_NoViableFunction;
}
OverloadCandidateSet::iterator Best;
OverloadingResult OverloadResult =
CandidateSet->BestViableFunction(*this, Fn->getLocStart(), Best);
if (OverloadResult == OR_No_Viable_Function) {
*CallExpr = ExprError();
return FRS_NoViableFunction;
}
*CallExpr = FinishOverloadedCallExpr(*this, S, Fn, Fn, Loc, Range,
Loc, nullptr, CandidateSet, &Best,
OverloadResult,
/*AllowTypoCorrection=*/false);
if (CallExpr->isInvalid() || OverloadResult != OR_Success) {
*CallExpr = ExprError();
return FRS_DiagnosticIssued;
}
}
return FRS_Success;
}
/// FixOverloadedFunctionReference - E is an expression that refers to
/// a C++ overloaded function (possibly with some parentheses and
/// perhaps a '&' around it). We have resolved the overloaded function
/// to the function declaration Fn, so patch up the expression E to
/// refer (possibly indirectly) to Fn. Returns the new expr.
Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
FunctionDecl *Fn) {
if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
Expr *SubExpr = FixOverloadedFunctionReference(PE->getSubExpr(),
Found, Fn);
if (SubExpr == PE->getSubExpr())
return PE;
return new (Context) ParenExpr(PE->getLParen(), PE->getRParen(), SubExpr);
}
if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
Expr *SubExpr = FixOverloadedFunctionReference(ICE->getSubExpr(),
Found, Fn);
assert(Context.hasSameType(ICE->getSubExpr()->getType(),
SubExpr->getType()) &&
"Implicit cast type cannot be determined from overload");
assert(ICE->path_empty() && "fixing up hierarchy conversion?");
if (SubExpr == ICE->getSubExpr())
return ICE;
return ImplicitCastExpr::Create(Context, ICE->getType(),
ICE->getCastKind(),
SubExpr, nullptr,
ICE->getValueKind());
}
if (auto *GSE = dyn_cast<GenericSelectionExpr>(E)) {
if (!GSE->isResultDependent()) {
Expr *SubExpr =
FixOverloadedFunctionReference(GSE->getResultExpr(), Found, Fn);
if (SubExpr == GSE->getResultExpr())
return GSE;
// Replace the resulting type information before rebuilding the generic
// selection expression.
ArrayRef<Expr *> A = GSE->getAssocExprs();
SmallVector<Expr *, 4> AssocExprs(A.begin(), A.end());
unsigned ResultIdx = GSE->getResultIndex();
AssocExprs[ResultIdx] = SubExpr;
return new (Context) GenericSelectionExpr(
Context, GSE->getGenericLoc(), GSE->getControllingExpr(),
GSE->getAssocTypeSourceInfos(), AssocExprs, GSE->getDefaultLoc(),
GSE->getRParenLoc(), GSE->containsUnexpandedParameterPack(),
ResultIdx);
}
// Rather than fall through to the unreachable, return the original generic
// selection expression.
return GSE;
}
if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(E)) {
assert(UnOp->getOpcode() == UO_AddrOf &&
"Can only take the address of an overloaded function");
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
if (Method->isStatic()) {
// Do nothing: static member functions aren't any different
// from non-member functions.
} else {
// Fix the subexpression, which really has to be an
// UnresolvedLookupExpr holding an overloaded member function
// or template.
Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
Found, Fn);
if (SubExpr == UnOp->getSubExpr())
return UnOp;
assert(isa<DeclRefExpr>(SubExpr)
&& "fixed to something other than a decl ref");
assert(cast<DeclRefExpr>(SubExpr)->getQualifier()
&& "fixed to a member ref with no nested name qualifier");
// We have taken the address of a pointer to member
// function. Perform the computation here so that we get the
// appropriate pointer to member type.
QualType ClassType
= Context.getTypeDeclType(cast<RecordDecl>(Method->getDeclContext()));
QualType MemPtrType
= Context.getMemberPointerType(Fn->getType(), ClassType.getTypePtr());
// Under the MS ABI, lock down the inheritance model now.
if (Context.getTargetInfo().getCXXABI().isMicrosoft())
(void)isCompleteType(UnOp->getOperatorLoc(), MemPtrType);
return new (Context) UnaryOperator(SubExpr, UO_AddrOf, MemPtrType,
VK_RValue, OK_Ordinary,
UnOp->getOperatorLoc(), false);
}
}
Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
Found, Fn);
if (SubExpr == UnOp->getSubExpr())
return UnOp;
return new (Context) UnaryOperator(SubExpr, UO_AddrOf,
Context.getPointerType(SubExpr->getType()),
VK_RValue, OK_Ordinary,
UnOp->getOperatorLoc(), false);
}
// C++ [except.spec]p17:
// An exception-specification is considered to be needed when:
// - in an expression the function is the unique lookup result or the
// selected member of a set of overloaded functions
if (auto *FPT = Fn->getType()->getAs<FunctionProtoType>())
ResolveExceptionSpec(E->getExprLoc(), FPT);
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
// FIXME: avoid copy.
TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr;
if (ULE->hasExplicitTemplateArgs()) {
ULE->copyTemplateArgumentsInto(TemplateArgsBuffer);
TemplateArgs = &TemplateArgsBuffer;
}
DeclRefExpr *DRE = DeclRefExpr::Create(Context,
ULE->getQualifierLoc(),
ULE->getTemplateKeywordLoc(),
Fn,
/*enclosing*/ false, // FIXME?
ULE->getNameLoc(),
Fn->getType(),
VK_LValue,
Found.getDecl(),
TemplateArgs);
MarkDeclRefReferenced(DRE);
DRE->setHadMultipleCandidates(ULE->getNumDecls() > 1);
return DRE;
}
if (UnresolvedMemberExpr *MemExpr = dyn_cast<UnresolvedMemberExpr>(E)) {
// FIXME: avoid copy.
TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr;
if (MemExpr->hasExplicitTemplateArgs()) {
MemExpr->copyTemplateArgumentsInto(TemplateArgsBuffer);
TemplateArgs = &TemplateArgsBuffer;
}
Expr *Base;
// If we're filling in a static method where we used to have an
// implicit member access, rewrite to a simple decl ref.
if (MemExpr->isImplicitAccess()) {
if (cast<CXXMethodDecl>(Fn)->isStatic()) {
DeclRefExpr *DRE = DeclRefExpr::Create(Context,
MemExpr->getQualifierLoc(),
MemExpr->getTemplateKeywordLoc(),
Fn,
/*enclosing*/ false,
MemExpr->getMemberLoc(),
Fn->getType(),
VK_LValue,
Found.getDecl(),
TemplateArgs);
MarkDeclRefReferenced(DRE);
DRE->setHadMultipleCandidates(MemExpr->getNumDecls() > 1);
return DRE;
} else {
SourceLocation Loc = MemExpr->getMemberLoc();
if (MemExpr->getQualifier())
Loc = MemExpr->getQualifierLoc().getBeginLoc();
CheckCXXThisCapture(Loc);
Base = new (Context) CXXThisExpr(Loc,
MemExpr->getBaseType(),
/*isImplicit=*/true);
}
} else
Base = MemExpr->getBase();
ExprValueKind valueKind;
QualType type;
if (cast<CXXMethodDecl>(Fn)->isStatic()) {
valueKind = VK_LValue;
type = Fn->getType();
} else {
valueKind = VK_RValue;
type = Context.BoundMemberTy;
}
MemberExpr *ME = MemberExpr::Create(
Context, Base, MemExpr->isArrow(), MemExpr->getOperatorLoc(),
MemExpr->getQualifierLoc(), MemExpr->getTemplateKeywordLoc(), Fn, Found,
MemExpr->getMemberNameInfo(), TemplateArgs, type, valueKind,
OK_Ordinary);
ME->setHadMultipleCandidates(true);
MarkMemberReferenced(ME);
return ME;
}
llvm_unreachable("Invalid reference to overloaded function");
}
ExprResult Sema::FixOverloadedFunctionReference(ExprResult E,
DeclAccessPair Found,
FunctionDecl *Fn) {
return FixOverloadedFunctionReference(E.get(), Found, Fn);
}
Index: projects/clang700-import/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp (revision 340125)
@@ -1,10101 +1,10103 @@
//===------- SemaTemplate.cpp - Semantic Analysis for C++ Templates -------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//===----------------------------------------------------------------------===//
//
// This file implements semantic analysis for C++ templates.
//===----------------------------------------------------------------------===//
#include "TreeTransform.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include <iterator>
using namespace clang;
using namespace sema;
// Exported for use by Parser.
SourceRange
clang::getTemplateParamsRange(TemplateParameterList const * const *Ps,
unsigned N) {
if (!N) return SourceRange();
return SourceRange(Ps[0]->getTemplateLoc(), Ps[N-1]->getRAngleLoc());
}
namespace clang {
/// [temp.constr.decl]p2: A template's associated constraints are
/// defined as a single constraint-expression derived from the introduced
/// constraint-expressions [ ... ].
///
/// \param Params The template parameter list and optional requires-clause.
///
/// \param FD The underlying templated function declaration for a function
/// template.
static Expr *formAssociatedConstraints(TemplateParameterList *Params,
FunctionDecl *FD);
}
static Expr *clang::formAssociatedConstraints(TemplateParameterList *Params,
FunctionDecl *FD) {
// FIXME: Concepts: collect additional introduced constraint-expressions
assert(!FD && "Cannot collect constraints from function declaration yet.");
return Params->getRequiresClause();
}
/// Determine whether the declaration found is acceptable as the name
/// of a template and, if so, return that template declaration. Otherwise,
/// returns NULL.
static NamedDecl *isAcceptableTemplateName(ASTContext &Context,
NamedDecl *Orig,
bool AllowFunctionTemplates) {
NamedDecl *D = Orig->getUnderlyingDecl();
if (isa<TemplateDecl>(D)) {
if (!AllowFunctionTemplates && isa<FunctionTemplateDecl>(D))
return nullptr;
return Orig;
}
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
// C++ [temp.local]p1:
// Like normal (non-template) classes, class templates have an
// injected-class-name (Clause 9). The injected-class-name
// can be used with or without a template-argument-list. When
// it is used without a template-argument-list, it is
// equivalent to the injected-class-name followed by the
// template-parameters of the class template enclosed in
// <>. When it is used with a template-argument-list, it
// refers to the specified class template specialization,
// which could be the current specialization or another
// specialization.
if (Record->isInjectedClassName()) {
Record = cast<CXXRecordDecl>(Record->getDeclContext());
if (Record->getDescribedClassTemplate())
return Record->getDescribedClassTemplate();
if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(Record))
return Spec->getSpecializedTemplate();
}
return nullptr;
}
// 'using Dependent::foo;' can resolve to a template name.
// 'using typename Dependent::foo;' cannot (not even if 'foo' is an
// injected-class-name).
if (isa<UnresolvedUsingValueDecl>(D))
return D;
return nullptr;
}
void Sema::FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates) {
// The set of class templates we've already seen.
llvm::SmallPtrSet<ClassTemplateDecl *, 8> ClassTemplates;
LookupResult::Filter filter = R.makeFilter();
while (filter.hasNext()) {
NamedDecl *Orig = filter.next();
NamedDecl *Repl = isAcceptableTemplateName(Context, Orig,
AllowFunctionTemplates);
if (!Repl)
filter.erase();
else if (Repl != Orig) {
// C++ [temp.local]p3:
// A lookup that finds an injected-class-name (10.2) can result in an
// ambiguity in certain cases (for example, if it is found in more than
// one base class). If all of the injected-class-names that are found
// refer to specializations of the same class template, and if the name
// is used as a template-name, the reference refers to the class
// template itself and not a specialization thereof, and is not
// ambiguous.
if (ClassTemplateDecl *ClassTmpl = dyn_cast<ClassTemplateDecl>(Repl))
if (!ClassTemplates.insert(ClassTmpl).second) {
filter.erase();
continue;
}
// FIXME: we promote access to public here as a workaround to
// the fact that LookupResult doesn't let us remember that we
// found this template through a particular injected class name,
// which means we end up doing nasty things to the invariants.
// Pretending that access is public is *much* safer.
filter.replace(Repl, AS_public);
}
}
filter.done();
}
bool Sema::hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates) {
for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I)
if (isAcceptableTemplateName(Context, *I, AllowFunctionTemplates))
return true;
return false;
}
TemplateNameKind Sema::isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectTypePtr,
bool EnteringContext,
TemplateTy &TemplateResult,
bool &MemberOfUnknownSpecialization) {
assert(getLangOpts().CPlusPlus && "No template names in C!");
DeclarationName TName;
MemberOfUnknownSpecialization = false;
switch (Name.getKind()) {
case UnqualifiedIdKind::IK_Identifier:
TName = DeclarationName(Name.Identifier);
break;
case UnqualifiedIdKind::IK_OperatorFunctionId:
TName = Context.DeclarationNames.getCXXOperatorName(
Name.OperatorFunctionId.Operator);
break;
case UnqualifiedIdKind::IK_LiteralOperatorId:
TName = Context.DeclarationNames.getCXXLiteralOperatorName(Name.Identifier);
break;
default:
return TNK_Non_template;
}
QualType ObjectType = ObjectTypePtr.get();
LookupResult R(*this, TName, Name.getLocStart(), LookupOrdinaryName);
if (LookupTemplateName(R, S, SS, ObjectType, EnteringContext,
MemberOfUnknownSpecialization))
return TNK_Non_template;
if (R.empty()) return TNK_Non_template;
if (R.isAmbiguous()) {
// Suppress diagnostics; we'll redo this lookup later.
R.suppressDiagnostics();
// FIXME: we might have ambiguous templates, in which case we
// should at least parse them properly!
return TNK_Non_template;
}
TemplateName Template;
TemplateNameKind TemplateKind;
unsigned ResultCount = R.end() - R.begin();
if (ResultCount > 1) {
// We assume that we'll preserve the qualifier from a function
// template name in other ways.
Template = Context.getOverloadedTemplateName(R.begin(), R.end());
TemplateKind = TNK_Function_template;
// We'll do this lookup again later.
R.suppressDiagnostics();
} else if (isa<UnresolvedUsingValueDecl>((*R.begin())->getUnderlyingDecl())) {
// We don't yet know whether this is a template-name or not.
MemberOfUnknownSpecialization = true;
return TNK_Non_template;
} else {
TemplateDecl *TD = cast<TemplateDecl>((*R.begin())->getUnderlyingDecl());
if (SS.isSet() && !SS.isInvalid()) {
NestedNameSpecifier *Qualifier = SS.getScopeRep();
Template = Context.getQualifiedTemplateName(Qualifier,
hasTemplateKeyword, TD);
} else {
Template = TemplateName(TD);
}
if (isa<FunctionTemplateDecl>(TD)) {
TemplateKind = TNK_Function_template;
// We'll do this lookup again later.
R.suppressDiagnostics();
} else {
assert(isa<ClassTemplateDecl>(TD) || isa<TemplateTemplateParmDecl>(TD) ||
isa<TypeAliasTemplateDecl>(TD) || isa<VarTemplateDecl>(TD) ||
isa<BuiltinTemplateDecl>(TD));
TemplateKind =
isa<VarTemplateDecl>(TD) ? TNK_Var_template : TNK_Type_template;
}
}
TemplateResult = TemplateTy::make(Template);
return TemplateKind;
}
bool Sema::isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template) {
CXXScopeSpec SS;
bool MemberOfUnknownSpecialization = false;
// We could use redeclaration lookup here, but we don't need to: the
// syntactic form of a deduction guide is enough to identify it even
// if we can't look up the template name at all.
LookupResult R(*this, DeclarationName(&Name), NameLoc, LookupOrdinaryName);
if (LookupTemplateName(R, S, SS, /*ObjectType*/ QualType(),
/*EnteringContext*/ false,
MemberOfUnknownSpecialization))
return false;
if (R.empty()) return false;
if (R.isAmbiguous()) {
// FIXME: Diagnose an ambiguity if we find at least one template.
R.suppressDiagnostics();
return false;
}
// We only treat template-names that name type templates as valid deduction
// guide names.
TemplateDecl *TD = R.getAsSingle<TemplateDecl>();
if (!TD || !getAsTypeTemplateDecl(TD))
return false;
if (Template)
*Template = TemplateTy::make(TemplateName(TD));
return true;
}
bool Sema::DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind) {
// We can't recover unless there's a dependent scope specifier preceding the
// template name.
// FIXME: Typo correction?
if (!SS || !SS->isSet() || !isDependentScopeSpecifier(*SS) ||
computeDeclContext(*SS))
return false;
// The code is missing a 'template' keyword prior to the dependent template
// name.
NestedNameSpecifier *Qualifier = (NestedNameSpecifier*)SS->getScopeRep();
Diag(IILoc, diag::err_template_kw_missing)
<< Qualifier << II.getName()
<< FixItHint::CreateInsertion(IILoc, "template ");
SuggestedTemplate
= TemplateTy::make(Context.getDependentTemplateName(Qualifier, &II));
SuggestedKind = TNK_Dependent_template_name;
return true;
}
bool Sema::LookupTemplateName(LookupResult &Found,
Scope *S, CXXScopeSpec &SS,
QualType ObjectType,
bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc) {
// Determine where to perform name lookup
MemberOfUnknownSpecialization = false;
DeclContext *LookupCtx = nullptr;
bool IsDependent = false;
if (!ObjectType.isNull()) {
// This nested-name-specifier occurs in a member access expression, e.g.,
// x->B::f, and we are looking into the type of the object.
assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
LookupCtx = computeDeclContext(ObjectType);
IsDependent = !LookupCtx;
assert((IsDependent || !ObjectType->isIncompleteType() ||
ObjectType->castAs<TagType>()->isBeingDefined()) &&
"Caller should have completed object type");
// Template names cannot appear inside an Objective-C class or object type.
if (ObjectType->isObjCObjectOrInterfaceType()) {
Found.clear();
return false;
}
} else if (SS.isSet()) {
// This nested-name-specifier occurs after another nested-name-specifier,
// so long into the context associated with the prior nested-name-specifier.
LookupCtx = computeDeclContext(SS, EnteringContext);
IsDependent = !LookupCtx;
// The declaration context must be complete.
if (LookupCtx && RequireCompleteDeclContext(SS, LookupCtx))
return true;
}
bool ObjectTypeSearchedInScope = false;
bool AllowFunctionTemplatesInLookup = true;
if (LookupCtx) {
// Perform "qualified" name lookup into the declaration context we
// computed, which is either the type of the base of a member access
// expression or the declaration context associated with a prior
// nested-name-specifier.
LookupQualifiedName(Found, LookupCtx);
// FIXME: The C++ standard does not clearly specify what happens in the
// case where the object type is dependent, and implementations vary. In
// Clang, we treat a name after a . or -> as a template-name if lookup
// finds a non-dependent member or member of the current instantiation that
// is a type template, or finds no such members and lookup in the context
// of the postfix-expression finds a type template. In the latter case, the
// name is nonetheless dependent, and we may resolve it to a member of an
// unknown specialization when we come to instantiate the template.
IsDependent |= Found.wasNotFoundInCurrentInstantiation();
}
if (!SS.isSet() && (ObjectType.isNull() || Found.empty())) {
// C++ [basic.lookup.classref]p1:
// In a class member access expression (5.2.5), if the . or -> token is
// immediately followed by an identifier followed by a <, the
// identifier must be looked up to determine whether the < is the
// beginning of a template argument list (14.2) or a less-than operator.
// The identifier is first looked up in the class of the object
// expression. If the identifier is not found, it is then looked up in
// the context of the entire postfix-expression and shall name a class
// template.
if (S)
LookupName(Found, S);
if (!ObjectType.isNull()) {
// FIXME: We should filter out all non-type templates here, particularly
// variable templates and concepts. But the exclusion of alias templates
// and template template parameters is a wording defect.
AllowFunctionTemplatesInLookup = false;
ObjectTypeSearchedInScope = true;
}
IsDependent |= Found.wasNotFoundInCurrentInstantiation();
}
if (Found.empty() && !IsDependent) {
// If we did not find any names, attempt to correct any typos.
DeclarationName Name = Found.getLookupName();
Found.clear();
// Simple filter callback that, for keywords, only accepts the C++ *_cast
auto FilterCCC = llvm::make_unique<CorrectionCandidateCallback>();
FilterCCC->WantTypeSpecifiers = false;
FilterCCC->WantExpressionKeywords = false;
FilterCCC->WantRemainingKeywords = false;
FilterCCC->WantCXXNamedCasts = true;
if (TypoCorrection Corrected = CorrectTypo(
Found.getLookupNameInfo(), Found.getLookupKind(), S, &SS,
std::move(FilterCCC), CTK_ErrorRecovery, LookupCtx)) {
Found.setLookupName(Corrected.getCorrection());
if (auto *ND = Corrected.getFoundDecl())
Found.addDecl(ND);
FilterAcceptableTemplateNames(Found);
if (!Found.empty()) {
if (LookupCtx) {
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
Name.getAsString() == CorrectedStr;
diagnoseTypo(Corrected, PDiag(diag::err_no_member_template_suggest)
<< Name << LookupCtx << DroppedSpecifier
<< SS.getRange());
} else {
diagnoseTypo(Corrected, PDiag(diag::err_no_template_suggest) << Name);
}
}
} else {
Found.setLookupName(Name);
}
}
NamedDecl *ExampleLookupResult =
Found.empty() ? nullptr : Found.getRepresentativeDecl();
FilterAcceptableTemplateNames(Found, AllowFunctionTemplatesInLookup);
if (Found.empty()) {
if (IsDependent) {
MemberOfUnknownSpecialization = true;
return false;
}
// If a 'template' keyword was used, a lookup that finds only non-template
// names is an error.
if (ExampleLookupResult && TemplateKWLoc.isValid()) {
Diag(Found.getNameLoc(), diag::err_template_kw_refers_to_non_template)
<< Found.getLookupName() << SS.getRange();
Diag(ExampleLookupResult->getUnderlyingDecl()->getLocation(),
diag::note_template_kw_refers_to_non_template)
<< Found.getLookupName();
return true;
}
return false;
}
if (S && !ObjectType.isNull() && !ObjectTypeSearchedInScope &&
!getLangOpts().CPlusPlus11) {
// C++03 [basic.lookup.classref]p1:
// [...] If the lookup in the class of the object expression finds a
// template, the name is also looked up in the context of the entire
// postfix-expression and [...]
//
// Note: C++11 does not perform this second lookup.
LookupResult FoundOuter(*this, Found.getLookupName(), Found.getNameLoc(),
LookupOrdinaryName);
LookupName(FoundOuter, S);
FilterAcceptableTemplateNames(FoundOuter, /*AllowFunctionTemplates=*/false);
if (FoundOuter.empty()) {
// - if the name is not found, the name found in the class of the
// object expression is used, otherwise
} else if (!FoundOuter.getAsSingle<ClassTemplateDecl>() ||
FoundOuter.isAmbiguous()) {
// - if the name is found in the context of the entire
// postfix-expression and does not name a class template, the name
// found in the class of the object expression is used, otherwise
FoundOuter.clear();
} else if (!Found.isSuppressingDiagnostics()) {
// - if the name found is a class template, it must refer to the same
// entity as the one found in the class of the object expression,
// otherwise the program is ill-formed.
if (!Found.isSingleResult() ||
Found.getFoundDecl()->getCanonicalDecl()
!= FoundOuter.getFoundDecl()->getCanonicalDecl()) {
Diag(Found.getNameLoc(),
diag::ext_nested_name_member_ref_lookup_ambiguous)
<< Found.getLookupName()
<< ObjectType;
Diag(Found.getRepresentativeDecl()->getLocation(),
diag::note_ambig_member_ref_object_type)
<< ObjectType;
Diag(FoundOuter.getFoundDecl()->getLocation(),
diag::note_ambig_member_ref_scope);
// Recover by taking the template that we found in the object
// expression's type.
}
}
}
return false;
}
void Sema::diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater) {
if (TemplateName.isInvalid())
return;
DeclarationNameInfo NameInfo;
CXXScopeSpec SS;
LookupNameKind LookupKind;
DeclContext *LookupCtx = nullptr;
NamedDecl *Found = nullptr;
bool MissingTemplateKeyword = false;
// Figure out what name we looked up.
if (auto *DRE = dyn_cast<DeclRefExpr>(TemplateName.get())) {
NameInfo = DRE->getNameInfo();
SS.Adopt(DRE->getQualifierLoc());
LookupKind = LookupOrdinaryName;
Found = DRE->getFoundDecl();
} else if (auto *ME = dyn_cast<MemberExpr>(TemplateName.get())) {
NameInfo = ME->getMemberNameInfo();
SS.Adopt(ME->getQualifierLoc());
LookupKind = LookupMemberName;
LookupCtx = ME->getBase()->getType()->getAsCXXRecordDecl();
Found = ME->getMemberDecl();
} else if (auto *DSDRE =
dyn_cast<DependentScopeDeclRefExpr>(TemplateName.get())) {
NameInfo = DSDRE->getNameInfo();
SS.Adopt(DSDRE->getQualifierLoc());
MissingTemplateKeyword = true;
} else if (auto *DSME =
dyn_cast<CXXDependentScopeMemberExpr>(TemplateName.get())) {
NameInfo = DSME->getMemberNameInfo();
SS.Adopt(DSME->getQualifierLoc());
MissingTemplateKeyword = true;
} else {
llvm_unreachable("unexpected kind of potential template name");
}
// If this is a dependent-scope lookup, diagnose that the 'template' keyword
// was missing.
if (MissingTemplateKeyword) {
Diag(NameInfo.getLocStart(), diag::err_template_kw_missing)
<< "" << NameInfo.getName().getAsString()
<< SourceRange(Less, Greater);
return;
}
// Try to correct the name by looking for templates and C++ named casts.
struct TemplateCandidateFilter : CorrectionCandidateCallback {
TemplateCandidateFilter() {
WantTypeSpecifiers = false;
WantExpressionKeywords = false;
WantRemainingKeywords = false;
WantCXXNamedCasts = true;
};
bool ValidateCandidate(const TypoCorrection &Candidate) override {
if (auto *ND = Candidate.getCorrectionDecl())
return isAcceptableTemplateName(ND->getASTContext(), ND, true);
return Candidate.isKeyword();
}
};
DeclarationName Name = NameInfo.getName();
if (TypoCorrection Corrected =
CorrectTypo(NameInfo, LookupKind, S, &SS,
llvm::make_unique<TemplateCandidateFilter>(),
CTK_ErrorRecovery, LookupCtx)) {
auto *ND = Corrected.getFoundDecl();
if (ND)
ND = isAcceptableTemplateName(Context, ND,
/*AllowFunctionTemplates*/ true);
if (ND || Corrected.isKeyword()) {
if (LookupCtx) {
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
bool DroppedSpecifier = Corrected.WillReplaceSpecifier() &&
Name.getAsString() == CorrectedStr;
diagnoseTypo(Corrected,
PDiag(diag::err_non_template_in_member_template_id_suggest)
<< Name << LookupCtx << DroppedSpecifier
<< SS.getRange(), false);
} else {
diagnoseTypo(Corrected,
PDiag(diag::err_non_template_in_template_id_suggest)
<< Name, false);
}
if (Found)
Diag(Found->getLocation(),
diag::note_non_template_in_template_id_found);
return;
}
}
Diag(NameInfo.getLoc(), diag::err_non_template_in_template_id)
<< Name << SourceRange(Less, Greater);
if (Found)
Diag(Found->getLocation(), diag::note_non_template_in_template_id_found);
}
/// ActOnDependentIdExpression - Handle a dependent id-expression that
/// was just parsed. This is only possible with an explicit scope
/// specifier naming a dependent type.
ExprResult
Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs) {
DeclContext *DC = getFunctionLevelDeclContext();
// C++11 [expr.prim.general]p12:
// An id-expression that denotes a non-static data member or non-static
// member function of a class can only be used:
// (...)
// - if that id-expression denotes a non-static data member and it
// appears in an unevaluated operand.
//
// If this might be the case, form a DependentScopeDeclRefExpr instead of a
// CXXDependentScopeMemberExpr. The former can instantiate to either
// DeclRefExpr or MemberExpr depending on lookup results, while the latter is
// always a MemberExpr.
bool MightBeCxx11UnevalField =
getLangOpts().CPlusPlus11 && isUnevaluatedContext();
// Check if the nested name specifier is an enum type.
bool IsEnum = false;
if (NestedNameSpecifier *NNS = SS.getScopeRep())
IsEnum = dyn_cast_or_null<EnumType>(NNS->getAsType());
if (!MightBeCxx11UnevalField && !isAddressOfOperand && !IsEnum &&
isa<CXXMethodDecl>(DC) && cast<CXXMethodDecl>(DC)->isInstance()) {
QualType ThisType = cast<CXXMethodDecl>(DC)->getThisType(Context);
// Since the 'this' expression is synthesized, we don't need to
// perform the double-lookup check.
NamedDecl *FirstQualifierInScope = nullptr;
return CXXDependentScopeMemberExpr::Create(
Context, /*This*/ nullptr, ThisType, /*IsArrow*/ true,
/*Op*/ SourceLocation(), SS.getWithLocInContext(Context), TemplateKWLoc,
FirstQualifierInScope, NameInfo, TemplateArgs);
}
return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
}
ExprResult
Sema::BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
return DependentScopeDeclRefExpr::Create(
Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo,
TemplateArgs);
}
/// Determine whether we would be unable to instantiate this template (because
/// it either has no definition, or is in the process of being instantiated).
bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain /*= true*/) {
assert(isa<TagDecl>(Instantiation) || isa<FunctionDecl>(Instantiation) ||
isa<VarDecl>(Instantiation));
bool IsEntityBeingDefined = false;
if (const TagDecl *TD = dyn_cast_or_null<TagDecl>(PatternDef))
IsEntityBeingDefined = TD->isBeingDefined();
if (PatternDef && !IsEntityBeingDefined) {
NamedDecl *SuggestedDef = nullptr;
if (!hasVisibleDefinition(const_cast<NamedDecl*>(PatternDef), &SuggestedDef,
/*OnlyNeedComplete*/false)) {
// If we're allowed to diagnose this and recover, do so.
bool Recover = Complain && !isSFINAEContext();
if (Complain)
diagnoseMissingImport(PointOfInstantiation, SuggestedDef,
Sema::MissingImportKind::Definition, Recover);
return !Recover;
}
return false;
}
if (!Complain || (PatternDef && PatternDef->isInvalidDecl()))
return true;
llvm::Optional<unsigned> Note;
QualType InstantiationTy;
if (TagDecl *TD = dyn_cast<TagDecl>(Instantiation))
InstantiationTy = Context.getTypeDeclType(TD);
if (PatternDef) {
Diag(PointOfInstantiation,
diag::err_template_instantiate_within_definition)
<< /*implicit|explicit*/(TSK != TSK_ImplicitInstantiation)
<< InstantiationTy;
// Not much point in noting the template declaration here, since
// we're lexically inside it.
Instantiation->setInvalidDecl();
} else if (InstantiatedFromMember) {
if (isa<FunctionDecl>(Instantiation)) {
Diag(PointOfInstantiation,
diag::err_explicit_instantiation_undefined_member)
<< /*member function*/ 1 << Instantiation->getDeclName()
<< Instantiation->getDeclContext();
Note = diag::note_explicit_instantiation_here;
} else {
assert(isa<TagDecl>(Instantiation) && "Must be a TagDecl!");
Diag(PointOfInstantiation,
diag::err_implicit_instantiate_member_undefined)
<< InstantiationTy;
Note = diag::note_member_declared_at;
}
} else {
if (isa<FunctionDecl>(Instantiation)) {
Diag(PointOfInstantiation,
diag::err_explicit_instantiation_undefined_func_template)
<< Pattern;
Note = diag::note_explicit_instantiation_here;
} else if (isa<TagDecl>(Instantiation)) {
Diag(PointOfInstantiation, diag::err_template_instantiate_undefined)
<< (TSK != TSK_ImplicitInstantiation)
<< InstantiationTy;
Note = diag::note_template_decl_here;
} else {
assert(isa<VarDecl>(Instantiation) && "Must be a VarDecl!");
if (isa<VarTemplateSpecializationDecl>(Instantiation)) {
Diag(PointOfInstantiation,
diag::err_explicit_instantiation_undefined_var_template)
<< Instantiation;
Instantiation->setInvalidDecl();
} else
Diag(PointOfInstantiation,
diag::err_explicit_instantiation_undefined_member)
<< /*static data member*/ 2 << Instantiation->getDeclName()
<< Instantiation->getDeclContext();
Note = diag::note_explicit_instantiation_here;
}
}
if (Note) // Diagnostics were emitted.
Diag(Pattern->getLocation(), Note.getValue());
// In general, Instantiation isn't marked invalid to get more than one
// error for multiple undefined instantiations. But the code that does
// explicit declaration -> explicit definition conversion can't handle
// invalid declarations, so mark as invalid in that case.
if (TSK == TSK_ExplicitInstantiationDeclaration)
Instantiation->setInvalidDecl();
return true;
}
/// DiagnoseTemplateParameterShadow - Produce a diagnostic complaining
/// that the template parameter 'PrevDecl' is being shadowed by a new
/// declaration at location Loc. Returns true to indicate that this is
/// an error, and false otherwise.
void Sema::DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl) {
assert(PrevDecl->isTemplateParameter() && "Not a template parameter");
// Microsoft Visual C++ permits template parameters to be shadowed.
if (getLangOpts().MicrosoftExt)
return;
// C++ [temp.local]p4:
// A template-parameter shall not be redeclared within its
// scope (including nested scopes).
Diag(Loc, diag::err_template_param_shadow)
<< cast<NamedDecl>(PrevDecl)->getDeclName();
Diag(PrevDecl->getLocation(), diag::note_template_param_here);
}
/// AdjustDeclIfTemplate - If the given decl happens to be a template, reset
/// the parameter D to reference the templated declaration and return a pointer
/// to the template declaration. Otherwise, do nothing to D and return null.
TemplateDecl *Sema::AdjustDeclIfTemplate(Decl *&D) {
if (TemplateDecl *Temp = dyn_cast_or_null<TemplateDecl>(D)) {
D = Temp->getTemplatedDecl();
return Temp;
}
return nullptr;
}
ParsedTemplateArgument ParsedTemplateArgument::getTemplatePackExpansion(
SourceLocation EllipsisLoc) const {
assert(Kind == Template &&
"Only template template arguments can be pack expansions here");
assert(getAsTemplate().get().containsUnexpandedParameterPack() &&
"Template template argument pack expansion without packs");
ParsedTemplateArgument Result(*this);
Result.EllipsisLoc = EllipsisLoc;
return Result;
}
static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
const ParsedTemplateArgument &Arg) {
switch (Arg.getKind()) {
case ParsedTemplateArgument::Type: {
TypeSourceInfo *DI;
QualType T = SemaRef.GetTypeFromParser(Arg.getAsType(), &DI);
if (!DI)
DI = SemaRef.Context.getTrivialTypeSourceInfo(T, Arg.getLocation());
return TemplateArgumentLoc(TemplateArgument(T), DI);
}
case ParsedTemplateArgument::NonType: {
Expr *E = static_cast<Expr *>(Arg.getAsExpr());
return TemplateArgumentLoc(TemplateArgument(E), E);
}
case ParsedTemplateArgument::Template: {
TemplateName Template = Arg.getAsTemplate().get();
TemplateArgument TArg;
if (Arg.getEllipsisLoc().isValid())
TArg = TemplateArgument(Template, Optional<unsigned int>());
else
TArg = Template;
return TemplateArgumentLoc(TArg,
Arg.getScopeSpec().getWithLocInContext(
SemaRef.Context),
Arg.getLocation(),
Arg.getEllipsisLoc());
}
}
llvm_unreachable("Unhandled parsed template argument");
}
/// Translates template arguments as provided by the parser
/// into template arguments used by semantic analysis.
void Sema::translateTemplateArguments(const ASTTemplateArgsPtr &TemplateArgsIn,
TemplateArgumentListInfo &TemplateArgs) {
for (unsigned I = 0, Last = TemplateArgsIn.size(); I != Last; ++I)
TemplateArgs.addArgument(translateTemplateArgument(*this,
TemplateArgsIn[I]));
}
static void maybeDiagnoseTemplateParameterShadow(Sema &SemaRef, Scope *S,
SourceLocation Loc,
IdentifierInfo *Name) {
NamedDecl *PrevDecl = SemaRef.LookupSingleName(
S, Name, Loc, Sema::LookupOrdinaryName, Sema::ForVisibleRedeclaration);
if (PrevDecl && PrevDecl->isTemplateParameter())
SemaRef.DiagnoseTemplateParameterShadow(Loc, PrevDecl);
}
/// Convert a parsed type into a parsed template argument. This is mostly
/// trivial, except that we may have parsed a C++17 deduced class template
/// specialization type, in which case we should form a template template
/// argument instead of a type template argument.
ParsedTemplateArgument Sema::ActOnTemplateTypeArgument(TypeResult ParsedType) {
TypeSourceInfo *TInfo;
QualType T = GetTypeFromParser(ParsedType.get(), &TInfo);
if (T.isNull())
return ParsedTemplateArgument();
assert(TInfo && "template argument with no location");
// If we might have formed a deduced template specialization type, convert
// it to a template template argument.
if (getLangOpts().CPlusPlus17) {
TypeLoc TL = TInfo->getTypeLoc();
SourceLocation EllipsisLoc;
if (auto PET = TL.getAs<PackExpansionTypeLoc>()) {
EllipsisLoc = PET.getEllipsisLoc();
TL = PET.getPatternLoc();
}
CXXScopeSpec SS;
if (auto ET = TL.getAs<ElaboratedTypeLoc>()) {
SS.Adopt(ET.getQualifierLoc());
TL = ET.getNamedTypeLoc();
}
if (auto DTST = TL.getAs<DeducedTemplateSpecializationTypeLoc>()) {
TemplateName Name = DTST.getTypePtr()->getTemplateName();
if (SS.isSet())
Name = Context.getQualifiedTemplateName(SS.getScopeRep(),
/*HasTemplateKeyword*/ false,
Name.getAsTemplateDecl());
ParsedTemplateArgument Result(SS, TemplateTy::make(Name),
DTST.getTemplateNameLoc());
if (EllipsisLoc.isValid())
Result = Result.getTemplatePackExpansion(EllipsisLoc);
return Result;
}
}
// This is a normal type template argument. Note, if the type template
// argument is an injected-class-name for a template, it has a dual nature
// and can be used as either a type or a template. We handle that in
// convertTypeTemplateArgumentToTemplate.
return ParsedTemplateArgument(ParsedTemplateArgument::Type,
ParsedType.get().getAsOpaquePtr(),
TInfo->getTypeLoc().getLocStart());
}
/// ActOnTypeParameter - Called when a C++ template type parameter
/// (e.g., "typename T") has been parsed. Typename specifies whether
/// the keyword "typename" was used to declare the type parameter
/// (otherwise, "class" was used), and KeyLoc is the location of the
/// "class" or "typename" keyword. ParamName is the name of the
/// parameter (NULL indicates an unnamed template parameter) and
/// ParamNameLoc is the location of the parameter name (if any).
/// If the type parameter has a default argument, it will be added
/// later via ActOnTypeParameterDefault.
NamedDecl *Sema::ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg) {
assert(S->isTemplateParamScope() &&
"Template type parameter not in template parameter scope!");
SourceLocation Loc = ParamNameLoc;
if (!ParamName)
Loc = KeyLoc;
bool IsParameterPack = EllipsisLoc.isValid();
TemplateTypeParmDecl *Param
= TemplateTypeParmDecl::Create(Context, Context.getTranslationUnitDecl(),
KeyLoc, Loc, Depth, Position, ParamName,
Typename, IsParameterPack);
Param->setAccess(AS_public);
if (ParamName) {
maybeDiagnoseTemplateParameterShadow(*this, S, ParamNameLoc, ParamName);
// Add the template parameter into the current scope.
S->AddDecl(Param);
IdResolver.AddDecl(Param);
}
// C++0x [temp.param]p9:
// A default template-argument may be specified for any kind of
// template-parameter that is not a template parameter pack.
if (DefaultArg && IsParameterPack) {
Diag(EqualLoc, diag::err_template_param_pack_default_arg);
DefaultArg = nullptr;
}
// Handle the default argument, if provided.
if (DefaultArg) {
TypeSourceInfo *DefaultTInfo;
GetTypeFromParser(DefaultArg, &DefaultTInfo);
assert(DefaultTInfo && "expected source information for type");
// Check for unexpanded parameter packs.
if (DiagnoseUnexpandedParameterPack(Loc, DefaultTInfo,
UPPC_DefaultArgument))
return Param;
// Check the template argument itself.
if (CheckTemplateArgument(Param, DefaultTInfo)) {
Param->setInvalidDecl();
return Param;
}
Param->setDefaultArgument(DefaultTInfo);
}
return Param;
}
/// Check that the type of a non-type template parameter is
/// well-formed.
///
/// \returns the (possibly-promoted) parameter type if valid;
/// otherwise, produces a diagnostic and returns a NULL type.
QualType Sema::CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc) {
if (TSI->getType()->isUndeducedType()) {
// C++1z [temp.dep.expr]p3:
// An id-expression is type-dependent if it contains
// - an identifier associated by name lookup with a non-type
// template-parameter declared with a type that contains a
// placeholder type (7.1.7.4),
TSI = SubstAutoTypeSourceInfo(TSI, Context.DependentTy);
}
return CheckNonTypeTemplateParameterType(TSI->getType(), Loc);
}
QualType Sema::CheckNonTypeTemplateParameterType(QualType T,
SourceLocation Loc) {
// We don't allow variably-modified types as the type of non-type template
// parameters.
if (T->isVariablyModifiedType()) {
Diag(Loc, diag::err_variably_modified_nontype_template_param)
<< T;
return QualType();
}
// C++ [temp.param]p4:
//
// A non-type template-parameter shall have one of the following
// (optionally cv-qualified) types:
//
// -- integral or enumeration type,
if (T->isIntegralOrEnumerationType() ||
// -- pointer to object or pointer to function,
T->isPointerType() ||
// -- reference to object or reference to function,
T->isReferenceType() ||
// -- pointer to member,
T->isMemberPointerType() ||
// -- std::nullptr_t.
T->isNullPtrType() ||
// If T is a dependent type, we can't do the check now, so we
// assume that it is well-formed.
T->isDependentType() ||
// Allow use of auto in template parameter declarations.
T->isUndeducedType()) {
// C++ [temp.param]p5: The top-level cv-qualifiers on the template-parameter
// are ignored when determining its type.
return T.getUnqualifiedType();
}
// C++ [temp.param]p8:
//
// A non-type template-parameter of type "array of T" or
// "function returning T" is adjusted to be of type "pointer to
// T" or "pointer to function returning T", respectively.
else if (T->isArrayType() || T->isFunctionType())
return Context.getDecayedType(T);
Diag(Loc, diag::err_template_nontype_parm_bad_type)
<< T;
return QualType();
}
NamedDecl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *Default) {
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
// Check that we have valid decl-specifiers specified.
auto CheckValidDeclSpecifiers = [this, &D] {
// C++ [temp.param]
// p1
// template-parameter:
// ...
// parameter-declaration
// p2
// ... A storage class shall not be specified in a template-parameter
// declaration.
// [dcl.typedef]p1:
// The typedef specifier [...] shall not be used in the decl-specifier-seq
// of a parameter-declaration
const DeclSpec &DS = D.getDeclSpec();
auto EmitDiag = [this](SourceLocation Loc) {
Diag(Loc, diag::err_invalid_decl_specifier_in_nontype_parm)
<< FixItHint::CreateRemoval(Loc);
};
if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified)
EmitDiag(DS.getStorageClassSpecLoc());
if (DS.getThreadStorageClassSpec() != TSCS_unspecified)
EmitDiag(DS.getThreadStorageClassSpecLoc());
// [dcl.inline]p1:
// The inline specifier can be applied only to the declaration or
// definition of a variable or function.
if (DS.isInlineSpecified())
EmitDiag(DS.getInlineSpecLoc());
// [dcl.constexpr]p1:
// The constexpr specifier shall be applied only to the definition of a
// variable or variable template or the declaration of a function or
// function template.
if (DS.isConstexprSpecified())
EmitDiag(DS.getConstexprSpecLoc());
// [dcl.fct.spec]p1:
// Function-specifiers can be used only in function declarations.
if (DS.isVirtualSpecified())
EmitDiag(DS.getVirtualSpecLoc());
if (DS.isExplicitSpecified())
EmitDiag(DS.getExplicitSpecLoc());
if (DS.isNoreturnSpecified())
EmitDiag(DS.getNoreturnSpecLoc());
};
CheckValidDeclSpecifiers();
if (TInfo->getType()->isUndeducedType()) {
Diag(D.getIdentifierLoc(),
diag::warn_cxx14_compat_template_nontype_parm_auto_type)
<< QualType(TInfo->getType()->getContainedAutoType(), 0);
}
assert(S->isTemplateParamScope() &&
"Non-type template parameter not in template parameter scope!");
bool Invalid = false;
QualType T = CheckNonTypeTemplateParameterType(TInfo, D.getIdentifierLoc());
if (T.isNull()) {
T = Context.IntTy; // Recover with an 'int' type.
Invalid = true;
}
IdentifierInfo *ParamName = D.getIdentifier();
bool IsParameterPack = D.hasEllipsis();
NonTypeTemplateParmDecl *Param
= NonTypeTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(),
D.getLocStart(),
D.getIdentifierLoc(),
Depth, Position, ParamName, T,
IsParameterPack, TInfo);
Param->setAccess(AS_public);
if (Invalid)
Param->setInvalidDecl();
if (ParamName) {
maybeDiagnoseTemplateParameterShadow(*this, S, D.getIdentifierLoc(),
ParamName);
// Add the template parameter into the current scope.
S->AddDecl(Param);
IdResolver.AddDecl(Param);
}
// C++0x [temp.param]p9:
// A default template-argument may be specified for any kind of
// template-parameter that is not a template parameter pack.
if (Default && IsParameterPack) {
Diag(EqualLoc, diag::err_template_param_pack_default_arg);
Default = nullptr;
}
// Check the well-formedness of the default template argument, if provided.
if (Default) {
// Check for unexpanded parameter packs.
if (DiagnoseUnexpandedParameterPack(Default, UPPC_DefaultArgument))
return Param;
TemplateArgument Converted;
ExprResult DefaultRes =
CheckTemplateArgument(Param, Param->getType(), Default, Converted);
if (DefaultRes.isInvalid()) {
Param->setInvalidDecl();
return Param;
}
Default = DefaultRes.get();
Param->setDefaultArgument(Default);
}
return Param;
}
/// ActOnTemplateTemplateParameter - Called when a C++ template template
/// parameter (e.g. T in template <template \<typename> class T> class array)
/// has been parsed. S is the current scope.
NamedDecl *Sema::ActOnTemplateTemplateParameter(Scope* S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument Default) {
assert(S->isTemplateParamScope() &&
"Template template parameter not in template parameter scope!");
// Construct the parameter object.
bool IsParameterPack = EllipsisLoc.isValid();
TemplateTemplateParmDecl *Param =
TemplateTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(),
NameLoc.isInvalid()? TmpLoc : NameLoc,
Depth, Position, IsParameterPack,
Name, Params);
Param->setAccess(AS_public);
// If the template template parameter has a name, then link the identifier
// into the scope and lookup mechanisms.
if (Name) {
maybeDiagnoseTemplateParameterShadow(*this, S, NameLoc, Name);
S->AddDecl(Param);
IdResolver.AddDecl(Param);
}
if (Params->size() == 0) {
Diag(Param->getLocation(), diag::err_template_template_parm_no_parms)
<< SourceRange(Params->getLAngleLoc(), Params->getRAngleLoc());
Param->setInvalidDecl();
}
// C++0x [temp.param]p9:
// A default template-argument may be specified for any kind of
// template-parameter that is not a template parameter pack.
if (IsParameterPack && !Default.isInvalid()) {
Diag(EqualLoc, diag::err_template_param_pack_default_arg);
Default = ParsedTemplateArgument();
}
if (!Default.isInvalid()) {
// Check only that we have a template template argument. We don't want to
// try to check well-formedness now, because our template template parameter
// might have dependent types in its template parameters, which we wouldn't
// be able to match now.
//
// If none of the template template parameter's template arguments mention
// other template parameters, we could actually perform more checking here.
// However, it isn't worth doing.
TemplateArgumentLoc DefaultArg = translateTemplateArgument(*this, Default);
if (DefaultArg.getArgument().getAsTemplate().isNull()) {
Diag(DefaultArg.getLocation(), diag::err_template_arg_not_valid_template)
<< DefaultArg.getSourceRange();
return Param;
}
// Check for unexpanded parameter packs.
if (DiagnoseUnexpandedParameterPack(DefaultArg.getLocation(),
DefaultArg.getArgument().getAsTemplate(),
UPPC_DefaultArgument))
return Param;
Param->setDefaultArgument(Context, DefaultArg);
}
return Param;
}
/// ActOnTemplateParameterList - Builds a TemplateParameterList, optionally
/// constrained by RequiresClause, that contains the template parameters in
/// Params.
TemplateParameterList *
Sema::ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause) {
if (ExportLoc.isValid())
Diag(ExportLoc, diag::warn_template_export_unsupported);
return TemplateParameterList::Create(
Context, TemplateLoc, LAngleLoc,
llvm::makeArrayRef(Params.data(), Params.size()),
RAngleLoc, RequiresClause);
}
static void SetNestedNameSpecifier(TagDecl *T, const CXXScopeSpec &SS) {
if (SS.isSet())
T->setQualifierInfo(SS.getWithLocInContext(T->getASTContext()));
}
DeclResult Sema::CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody) {
assert(TemplateParams && TemplateParams->size() > 0 &&
"No template parameters");
assert(TUK != TUK_Reference && "Can only declare or define class templates");
bool Invalid = false;
// Check that we can declare a template here.
if (CheckTemplateDeclScope(S, TemplateParams))
return true;
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
assert(Kind != TTK_Enum && "can't build template of enumerated type");
// There is no such thing as an unnamed class template.
if (!Name) {
Diag(KWLoc, diag::err_template_unnamed_class);
return true;
}
// Find any previous declaration with this name. For a friend with no
// scope explicitly specified, we only look for tag declarations (per
// C++11 [basic.lookup.elab]p2).
DeclContext *SemanticContext;
LookupResult Previous(*this, Name, NameLoc,
(SS.isEmpty() && TUK == TUK_Friend)
? LookupTagName : LookupOrdinaryName,
forRedeclarationInCurContext());
if (SS.isNotEmpty() && !SS.isInvalid()) {
SemanticContext = computeDeclContext(SS, true);
if (!SemanticContext) {
// FIXME: Horrible, horrible hack! We can't currently represent this
// in the AST, and historically we have just ignored such friend
// class templates, so don't complain here.
Diag(NameLoc, TUK == TUK_Friend
? diag::warn_template_qualified_friend_ignored
: diag::err_template_qualified_declarator_no_match)
<< SS.getScopeRep() << SS.getRange();
return TUK != TUK_Friend;
}
if (RequireCompleteDeclContext(SS, SemanticContext))
return true;
// If we're adding a template to a dependent context, we may need to
// rebuilding some of the types used within the template parameter list,
// now that we know what the current instantiation is.
if (SemanticContext->isDependentContext()) {
ContextRAII SavedContext(*this, SemanticContext);
if (RebuildTemplateParamsInCurrentInstantiation(TemplateParams))
Invalid = true;
} else if (TUK != TUK_Friend && TUK != TUK_Reference)
diagnoseQualifiedDeclaration(SS, SemanticContext, Name, NameLoc, false);
LookupQualifiedName(Previous, SemanticContext);
} else {
SemanticContext = CurContext;
// C++14 [class.mem]p14:
// If T is the name of a class, then each of the following shall have a
// name different from T:
// -- every member template of class T
if (TUK != TUK_Friend &&
DiagnoseClassNameShadow(SemanticContext,
DeclarationNameInfo(Name, NameLoc)))
return true;
LookupName(Previous, S);
}
if (Previous.isAmbiguous())
return true;
NamedDecl *PrevDecl = nullptr;
if (Previous.begin() != Previous.end())
PrevDecl = (*Previous.begin())->getUnderlyingDecl();
if (PrevDecl && PrevDecl->isTemplateParameter()) {
// Maybe we will complain about the shadowed template parameter.
DiagnoseTemplateParameterShadow(NameLoc, PrevDecl);
// Just pretend that we didn't see the previous declaration.
PrevDecl = nullptr;
}
// If there is a previous declaration with the same name, check
// whether this is a valid redeclaration.
ClassTemplateDecl *PrevClassTemplate =
dyn_cast_or_null<ClassTemplateDecl>(PrevDecl);
// We may have found the injected-class-name of a class template,
// class template partial specialization, or class template specialization.
// In these cases, grab the template that is being defined or specialized.
if (!PrevClassTemplate && PrevDecl && isa<CXXRecordDecl>(PrevDecl) &&
cast<CXXRecordDecl>(PrevDecl)->isInjectedClassName()) {
PrevDecl = cast<CXXRecordDecl>(PrevDecl->getDeclContext());
PrevClassTemplate
= cast<CXXRecordDecl>(PrevDecl)->getDescribedClassTemplate();
if (!PrevClassTemplate && isa<ClassTemplateSpecializationDecl>(PrevDecl)) {
PrevClassTemplate
= cast<ClassTemplateSpecializationDecl>(PrevDecl)
->getSpecializedTemplate();
}
}
if (TUK == TUK_Friend) {
// C++ [namespace.memdef]p3:
// [...] When looking for a prior declaration of a class or a function
// declared as a friend, and when the name of the friend class or
// function is neither a qualified name nor a template-id, scopes outside
// the innermost enclosing namespace scope are not considered.
if (!SS.isSet()) {
DeclContext *OutermostContext = CurContext;
while (!OutermostContext->isFileContext())
OutermostContext = OutermostContext->getLookupParent();
if (PrevDecl &&
(OutermostContext->Equals(PrevDecl->getDeclContext()) ||
OutermostContext->Encloses(PrevDecl->getDeclContext()))) {
SemanticContext = PrevDecl->getDeclContext();
} else {
// Declarations in outer scopes don't matter. However, the outermost
// context we computed is the semantic context for our new
// declaration.
PrevDecl = PrevClassTemplate = nullptr;
SemanticContext = OutermostContext;
// Check that the chosen semantic context doesn't already contain a
// declaration of this name as a non-tag type.
Previous.clear(LookupOrdinaryName);
DeclContext *LookupContext = SemanticContext;
while (LookupContext->isTransparentContext())
LookupContext = LookupContext->getLookupParent();
LookupQualifiedName(Previous, LookupContext);
if (Previous.isAmbiguous())
return true;
if (Previous.begin() != Previous.end())
PrevDecl = (*Previous.begin())->getUnderlyingDecl();
}
}
} else if (PrevDecl &&
!isDeclInScope(Previous.getRepresentativeDecl(), SemanticContext,
S, SS.isValid()))
PrevDecl = PrevClassTemplate = nullptr;
if (auto *Shadow = dyn_cast_or_null<UsingShadowDecl>(
PrevDecl ? Previous.getRepresentativeDecl() : nullptr)) {
if (SS.isEmpty() &&
!(PrevClassTemplate &&
PrevClassTemplate->getDeclContext()->getRedeclContext()->Equals(
SemanticContext->getRedeclContext()))) {
Diag(KWLoc, diag::err_using_decl_conflict_reverse);
Diag(Shadow->getTargetDecl()->getLocation(),
diag::note_using_decl_target);
Diag(Shadow->getUsingDecl()->getLocation(), diag::note_using_decl) << 0;
// Recover by ignoring the old declaration.
PrevDecl = PrevClassTemplate = nullptr;
}
}
// TODO Memory management; associated constraints are not always stored.
Expr *const CurAC = formAssociatedConstraints(TemplateParams, nullptr);
if (PrevClassTemplate) {
// Ensure that the template parameter lists are compatible. Skip this check
// for a friend in a dependent context: the template parameter list itself
// could be dependent.
if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
!TemplateParameterListsAreEqual(TemplateParams,
PrevClassTemplate->getTemplateParameters(),
/*Complain=*/true,
TPL_TemplateMatch))
return true;
// Check for matching associated constraints on redeclarations.
const Expr *const PrevAC = PrevClassTemplate->getAssociatedConstraints();
const bool RedeclACMismatch = [&] {
if (!(CurAC || PrevAC))
return false; // Nothing to check; no mismatch.
if (CurAC && PrevAC) {
llvm::FoldingSetNodeID CurACInfo, PrevACInfo;
CurAC->Profile(CurACInfo, Context, /*Canonical=*/true);
PrevAC->Profile(PrevACInfo, Context, /*Canonical=*/true);
if (CurACInfo == PrevACInfo)
return false; // All good; no mismatch.
}
return true;
}();
if (RedeclACMismatch) {
Diag(CurAC ? CurAC->getLocStart() : NameLoc,
diag::err_template_different_associated_constraints);
Diag(PrevAC ? PrevAC->getLocStart() : PrevClassTemplate->getLocation(),
diag::note_template_prev_declaration) << /*declaration*/0;
return true;
}
// C++ [temp.class]p4:
// In a redeclaration, partial specialization, explicit
// specialization or explicit instantiation of a class template,
// the class-key shall agree in kind with the original class
// template declaration (7.1.5.3).
RecordDecl *PrevRecordDecl = PrevClassTemplate->getTemplatedDecl();
if (!isAcceptableTagRedeclaration(PrevRecordDecl, Kind,
TUK == TUK_Definition, KWLoc, Name)) {
Diag(KWLoc, diag::err_use_with_wrong_tag)
<< Name
<< FixItHint::CreateReplacement(KWLoc, PrevRecordDecl->getKindName());
Diag(PrevRecordDecl->getLocation(), diag::note_previous_use);
Kind = PrevRecordDecl->getTagKind();
}
// Check for redefinition of this class template.
if (TUK == TUK_Definition) {
if (TagDecl *Def = PrevRecordDecl->getDefinition()) {
// If we have a prior definition that is not visible, treat this as
// simply making that previous definition visible.
NamedDecl *Hidden = nullptr;
if (SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
SkipBody->ShouldSkip = true;
auto *Tmpl = cast<CXXRecordDecl>(Hidden)->getDescribedClassTemplate();
assert(Tmpl && "original definition of a class template is not a "
"class template?");
makeMergedDefinitionVisible(Hidden);
makeMergedDefinitionVisible(Tmpl);
return Def;
}
Diag(NameLoc, diag::err_redefinition) << Name;
Diag(Def->getLocation(), diag::note_previous_definition);
// FIXME: Would it make sense to try to "forget" the previous
// definition, as part of error recovery?
return true;
}
}
} else if (PrevDecl) {
// C++ [temp]p5:
// A class template shall not have the same name as any other
// template, class, function, object, enumeration, enumerator,
// namespace, or type in the same scope (3.3), except as specified
// in (14.5.4).
Diag(NameLoc, diag::err_redefinition_different_kind) << Name;
Diag(PrevDecl->getLocation(), diag::note_previous_definition);
return true;
}
// Check the template parameter list of this declaration, possibly
// merging in the template parameter list from the previous class
// template declaration. Skip this check for a friend in a dependent
// context, because the template parameter list might be dependent.
if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
CheckTemplateParameterList(
TemplateParams,
PrevClassTemplate ? PrevClassTemplate->getTemplateParameters()
: nullptr,
(SS.isSet() && SemanticContext && SemanticContext->isRecord() &&
SemanticContext->isDependentContext())
? TPC_ClassTemplateMember
: TUK == TUK_Friend ? TPC_FriendClassTemplate
: TPC_ClassTemplate))
Invalid = true;
if (SS.isSet()) {
// If the name of the template was qualified, we must be defining the
// template out-of-line.
if (!SS.isInvalid() && !Invalid && !PrevClassTemplate) {
Diag(NameLoc, TUK == TUK_Friend ? diag::err_friend_decl_does_not_match
: diag::err_member_decl_does_not_match)
<< Name << SemanticContext << /*IsDefinition*/true << SS.getRange();
Invalid = true;
}
}
// If this is a templated friend in a dependent context we should not put it
// on the redecl chain. In some cases, the templated friend can be the most
// recent declaration tricking the template instantiator to make substitutions
// there.
// FIXME: Figure out how to combine with shouldLinkDependentDeclWithPrevious
bool ShouldAddRedecl
= !(TUK == TUK_Friend && CurContext->isDependentContext());
CXXRecordDecl *NewClass =
CXXRecordDecl::Create(Context, Kind, SemanticContext, KWLoc, NameLoc, Name,
PrevClassTemplate && ShouldAddRedecl ?
PrevClassTemplate->getTemplatedDecl() : nullptr,
/*DelayTypeCreation=*/true);
SetNestedNameSpecifier(NewClass, SS);
if (NumOuterTemplateParamLists > 0)
NewClass->setTemplateParameterListsInfo(
Context, llvm::makeArrayRef(OuterTemplateParamLists,
NumOuterTemplateParamLists));
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
if (TUK == TUK_Definition) {
AddAlignmentAttributesForRecord(NewClass);
AddMsStructLayoutForRecord(NewClass);
}
// Attach the associated constraints when the declaration will not be part of
// a decl chain.
Expr *const ACtoAttach =
PrevClassTemplate && ShouldAddRedecl ? nullptr : CurAC;
ClassTemplateDecl *NewTemplate
= ClassTemplateDecl::Create(Context, SemanticContext, NameLoc,
DeclarationName(Name), TemplateParams,
NewClass, ACtoAttach);
if (ShouldAddRedecl)
NewTemplate->setPreviousDecl(PrevClassTemplate);
NewClass->setDescribedClassTemplate(NewTemplate);
if (ModulePrivateLoc.isValid())
NewTemplate->setModulePrivate();
// Build the type for the class template declaration now.
QualType T = NewTemplate->getInjectedClassNameSpecialization();
T = Context.getInjectedClassNameType(NewClass, T);
assert(T->isDependentType() && "Class template type is not dependent?");
(void)T;
// If we are providing an explicit specialization of a member that is a
// class template, make a note of that.
if (PrevClassTemplate &&
PrevClassTemplate->getInstantiatedFromMemberTemplate())
PrevClassTemplate->setMemberSpecialization();
// Set the access specifier.
if (!Invalid && TUK != TUK_Friend && NewTemplate->getDeclContext()->isRecord())
SetMemberAccessSpecifier(NewTemplate, PrevClassTemplate, AS);
// Set the lexical context of these templates
NewClass->setLexicalDeclContext(CurContext);
NewTemplate->setLexicalDeclContext(CurContext);
if (TUK == TUK_Definition)
NewClass->startDefinition();
ProcessDeclAttributeList(S, NewClass, Attr);
if (PrevClassTemplate)
mergeDeclAttributes(NewClass, PrevClassTemplate->getTemplatedDecl());
AddPushedVisibilityAttribute(NewClass);
if (TUK != TUK_Friend) {
// Per C++ [basic.scope.temp]p2, skip the template parameter scopes.
Scope *Outer = S;
while ((Outer->getFlags() & Scope::TemplateParamScope) != 0)
Outer = Outer->getParent();
PushOnScopeChains(NewTemplate, Outer);
} else {
if (PrevClassTemplate && PrevClassTemplate->getAccess() != AS_none) {
NewTemplate->setAccess(PrevClassTemplate->getAccess());
NewClass->setAccess(PrevClassTemplate->getAccess());
}
NewTemplate->setObjectOfFriendDecl();
// Friend templates are visible in fairly strange ways.
if (!CurContext->isDependentContext()) {
DeclContext *DC = SemanticContext->getRedeclContext();
DC->makeDeclVisibleInContext(NewTemplate);
if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
PushOnScopeChains(NewTemplate, EnclosingScope,
/* AddToContext = */ false);
}
FriendDecl *Friend = FriendDecl::Create(
Context, CurContext, NewClass->getLocation(), NewTemplate, FriendLoc);
Friend->setAccess(AS_public);
CurContext->addDecl(Friend);
}
if (PrevClassTemplate)
CheckRedeclarationModuleOwnership(NewTemplate, PrevClassTemplate);
if (Invalid) {
NewTemplate->setInvalidDecl();
NewClass->setInvalidDecl();
}
ActOnDocumentableDecl(NewTemplate);
return NewTemplate;
}
namespace {
/// Tree transform to "extract" a transformed type from a class template's
/// constructor to a deduction guide.
class ExtractTypeForDeductionGuide
: public TreeTransform<ExtractTypeForDeductionGuide> {
public:
typedef TreeTransform<ExtractTypeForDeductionGuide> Base;
ExtractTypeForDeductionGuide(Sema &SemaRef) : Base(SemaRef) {}
TypeSourceInfo *transform(TypeSourceInfo *TSI) { return TransformType(TSI); }
QualType TransformTypedefType(TypeLocBuilder &TLB, TypedefTypeLoc TL) {
return TransformType(
TLB,
TL.getTypedefNameDecl()->getTypeSourceInfo()->getTypeLoc());
}
};
/// Transform to convert portions of a constructor declaration into the
/// corresponding deduction guide, per C++1z [over.match.class.deduct]p1.
struct ConvertConstructorToDeductionGuideTransform {
ConvertConstructorToDeductionGuideTransform(Sema &S,
ClassTemplateDecl *Template)
: SemaRef(S), Template(Template) {}
Sema &SemaRef;
ClassTemplateDecl *Template;
DeclContext *DC = Template->getDeclContext();
CXXRecordDecl *Primary = Template->getTemplatedDecl();
DeclarationName DeductionGuideName =
SemaRef.Context.DeclarationNames.getCXXDeductionGuideName(Template);
QualType DeducedType = SemaRef.Context.getTypeDeclType(Primary);
// Index adjustment to apply to convert depth-1 template parameters into
// depth-0 template parameters.
unsigned Depth1IndexAdjustment = Template->getTemplateParameters()->size();
/// Transform a constructor declaration into a deduction guide.
NamedDecl *transformConstructor(FunctionTemplateDecl *FTD,
CXXConstructorDecl *CD) {
SmallVector<TemplateArgument, 16> SubstArgs;
LocalInstantiationScope Scope(SemaRef);
// C++ [over.match.class.deduct]p1:
// -- For each constructor of the class template designated by the
// template-name, a function template with the following properties:
// -- The template parameters are the template parameters of the class
// template followed by the template parameters (including default
// template arguments) of the constructor, if any.
TemplateParameterList *TemplateParams = Template->getTemplateParameters();
if (FTD) {
TemplateParameterList *InnerParams = FTD->getTemplateParameters();
SmallVector<NamedDecl *, 16> AllParams;
AllParams.reserve(TemplateParams->size() + InnerParams->size());
AllParams.insert(AllParams.begin(),
TemplateParams->begin(), TemplateParams->end());
SubstArgs.reserve(InnerParams->size());
// Later template parameters could refer to earlier ones, so build up
// a list of substituted template arguments as we go.
for (NamedDecl *Param : *InnerParams) {
MultiLevelTemplateArgumentList Args;
Args.addOuterTemplateArguments(SubstArgs);
Args.addOuterRetainedLevel();
NamedDecl *NewParam = transformTemplateParameter(Param, Args);
if (!NewParam)
return nullptr;
AllParams.push_back(NewParam);
SubstArgs.push_back(SemaRef.Context.getCanonicalTemplateArgument(
SemaRef.Context.getInjectedTemplateArg(NewParam)));
}
TemplateParams = TemplateParameterList::Create(
SemaRef.Context, InnerParams->getTemplateLoc(),
InnerParams->getLAngleLoc(), AllParams, InnerParams->getRAngleLoc(),
/*FIXME: RequiresClause*/ nullptr);
}
// If we built a new template-parameter-list, track that we need to
// substitute references to the old parameters into references to the
// new ones.
MultiLevelTemplateArgumentList Args;
if (FTD) {
Args.addOuterTemplateArguments(SubstArgs);
Args.addOuterRetainedLevel();
}
FunctionProtoTypeLoc FPTL = CD->getTypeSourceInfo()->getTypeLoc()
.getAsAdjusted<FunctionProtoTypeLoc>();
assert(FPTL && "no prototype for constructor declaration");
// Transform the type of the function, adjusting the return type and
// replacing references to the old parameters with references to the
// new ones.
TypeLocBuilder TLB;
SmallVector<ParmVarDecl*, 8> Params;
QualType NewType = transformFunctionProtoType(TLB, FPTL, Params, Args);
if (NewType.isNull())
return nullptr;
TypeSourceInfo *NewTInfo = TLB.getTypeSourceInfo(SemaRef.Context, NewType);
return buildDeductionGuide(TemplateParams, CD->isExplicit(), NewTInfo,
CD->getLocStart(), CD->getLocation(),
CD->getLocEnd());
}
/// Build a deduction guide with the specified parameter types.
NamedDecl *buildSimpleDeductionGuide(MutableArrayRef<QualType> ParamTypes) {
SourceLocation Loc = Template->getLocation();
// Build the requested type.
FunctionProtoType::ExtProtoInfo EPI;
EPI.HasTrailingReturn = true;
QualType Result = SemaRef.BuildFunctionType(DeducedType, ParamTypes, Loc,
DeductionGuideName, EPI);
TypeSourceInfo *TSI = SemaRef.Context.getTrivialTypeSourceInfo(Result, Loc);
FunctionProtoTypeLoc FPTL =
TSI->getTypeLoc().castAs<FunctionProtoTypeLoc>();
// Build the parameters, needed during deduction / substitution.
SmallVector<ParmVarDecl*, 4> Params;
for (auto T : ParamTypes) {
ParmVarDecl *NewParam = ParmVarDecl::Create(
SemaRef.Context, DC, Loc, Loc, nullptr, T,
SemaRef.Context.getTrivialTypeSourceInfo(T, Loc), SC_None, nullptr);
NewParam->setScopeInfo(0, Params.size());
FPTL.setParam(Params.size(), NewParam);
Params.push_back(NewParam);
}
return buildDeductionGuide(Template->getTemplateParameters(), false, TSI,
Loc, Loc, Loc);
}
private:
/// Transform a constructor template parameter into a deduction guide template
/// parameter, rebuilding any internal references to earlier parameters and
/// renumbering as we go.
NamedDecl *transformTemplateParameter(NamedDecl *TemplateParam,
MultiLevelTemplateArgumentList &Args) {
if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(TemplateParam)) {
// TemplateTypeParmDecl's index cannot be changed after creation, so
// substitute it directly.
auto *NewTTP = TemplateTypeParmDecl::Create(
SemaRef.Context, DC, TTP->getLocStart(), TTP->getLocation(),
/*Depth*/0, Depth1IndexAdjustment + TTP->getIndex(),
TTP->getIdentifier(), TTP->wasDeclaredWithTypename(),
TTP->isParameterPack());
if (TTP->hasDefaultArgument()) {
TypeSourceInfo *InstantiatedDefaultArg =
SemaRef.SubstType(TTP->getDefaultArgumentInfo(), Args,
TTP->getDefaultArgumentLoc(), TTP->getDeclName());
if (InstantiatedDefaultArg)
NewTTP->setDefaultArgument(InstantiatedDefaultArg);
}
SemaRef.CurrentInstantiationScope->InstantiatedLocal(TemplateParam,
NewTTP);
return NewTTP;
}
if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(TemplateParam))
return transformTemplateParameterImpl(TTP, Args);
return transformTemplateParameterImpl(
cast<NonTypeTemplateParmDecl>(TemplateParam), Args);
}
template<typename TemplateParmDecl>
TemplateParmDecl *
transformTemplateParameterImpl(TemplateParmDecl *OldParam,
MultiLevelTemplateArgumentList &Args) {
// Ask the template instantiator to do the heavy lifting for us, then adjust
// the index of the parameter once it's done.
auto *NewParam =
cast_or_null<TemplateParmDecl>(SemaRef.SubstDecl(OldParam, DC, Args));
assert(NewParam->getDepth() == 0 && "unexpected template param depth");
NewParam->setPosition(NewParam->getPosition() + Depth1IndexAdjustment);
return NewParam;
}
QualType transformFunctionProtoType(TypeLocBuilder &TLB,
FunctionProtoTypeLoc TL,
SmallVectorImpl<ParmVarDecl*> &Params,
MultiLevelTemplateArgumentList &Args) {
SmallVector<QualType, 4> ParamTypes;
const FunctionProtoType *T = TL.getTypePtr();
// -- The types of the function parameters are those of the constructor.
for (auto *OldParam : TL.getParams()) {
ParmVarDecl *NewParam = transformFunctionTypeParam(OldParam, Args);
if (!NewParam)
return QualType();
ParamTypes.push_back(NewParam->getType());
Params.push_back(NewParam);
}
// -- The return type is the class template specialization designated by
// the template-name and template arguments corresponding to the
// template parameters obtained from the class template.
//
// We use the injected-class-name type of the primary template instead.
// This has the convenient property that it is different from any type that
// the user can write in a deduction-guide (because they cannot enter the
// context of the template), so implicit deduction guides can never collide
// with explicit ones.
QualType ReturnType = DeducedType;
TLB.pushTypeSpec(ReturnType).setNameLoc(Primary->getLocation());
// Resolving a wording defect, we also inherit the variadicness of the
// constructor.
FunctionProtoType::ExtProtoInfo EPI;
EPI.Variadic = T->isVariadic();
EPI.HasTrailingReturn = true;
QualType Result = SemaRef.BuildFunctionType(
ReturnType, ParamTypes, TL.getLocStart(), DeductionGuideName, EPI);
if (Result.isNull())
return QualType();
FunctionProtoTypeLoc NewTL = TLB.push<FunctionProtoTypeLoc>(Result);
NewTL.setLocalRangeBegin(TL.getLocalRangeBegin());
NewTL.setLParenLoc(TL.getLParenLoc());
NewTL.setRParenLoc(TL.getRParenLoc());
NewTL.setExceptionSpecRange(SourceRange());
NewTL.setLocalRangeEnd(TL.getLocalRangeEnd());
for (unsigned I = 0, E = NewTL.getNumParams(); I != E; ++I)
NewTL.setParam(I, Params[I]);
return Result;
}
ParmVarDecl *
transformFunctionTypeParam(ParmVarDecl *OldParam,
MultiLevelTemplateArgumentList &Args) {
TypeSourceInfo *OldDI = OldParam->getTypeSourceInfo();
TypeSourceInfo *NewDI;
if (auto PackTL = OldDI->getTypeLoc().getAs<PackExpansionTypeLoc>()) {
// Expand out the one and only element in each inner pack.
Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, 0);
NewDI =
SemaRef.SubstType(PackTL.getPatternLoc(), Args,
OldParam->getLocation(), OldParam->getDeclName());
if (!NewDI) return nullptr;
NewDI =
SemaRef.CheckPackExpansion(NewDI, PackTL.getEllipsisLoc(),
PackTL.getTypePtr()->getNumExpansions());
} else
NewDI = SemaRef.SubstType(OldDI, Args, OldParam->getLocation(),
OldParam->getDeclName());
if (!NewDI)
return nullptr;
// Extract the type. This (for instance) replaces references to typedef
// members of the current instantiations with the definitions of those
// typedefs, avoiding triggering instantiation of the deduced type during
// deduction.
NewDI = ExtractTypeForDeductionGuide(SemaRef).transform(NewDI);
// Resolving a wording defect, we also inherit default arguments from the
// constructor.
ExprResult NewDefArg;
if (OldParam->hasDefaultArg()) {
NewDefArg = SemaRef.SubstExpr(OldParam->getDefaultArg(), Args);
if (NewDefArg.isInvalid())
return nullptr;
}
ParmVarDecl *NewParam = ParmVarDecl::Create(SemaRef.Context, DC,
OldParam->getInnerLocStart(),
OldParam->getLocation(),
OldParam->getIdentifier(),
NewDI->getType(),
NewDI,
OldParam->getStorageClass(),
NewDefArg.get());
NewParam->setScopeInfo(OldParam->getFunctionScopeDepth(),
OldParam->getFunctionScopeIndex());
SemaRef.CurrentInstantiationScope->InstantiatedLocal(OldParam, NewParam);
return NewParam;
}
NamedDecl *buildDeductionGuide(TemplateParameterList *TemplateParams,
bool Explicit, TypeSourceInfo *TInfo,
SourceLocation LocStart, SourceLocation Loc,
SourceLocation LocEnd) {
DeclarationNameInfo Name(DeductionGuideName, Loc);
ArrayRef<ParmVarDecl *> Params =
TInfo->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams();
// Build the implicit deduction guide template.
auto *Guide =
CXXDeductionGuideDecl::Create(SemaRef.Context, DC, LocStart, Explicit,
Name, TInfo->getType(), TInfo, LocEnd);
Guide->setImplicit();
Guide->setParams(Params);
for (auto *Param : Params)
Param->setDeclContext(Guide);
auto *GuideTemplate = FunctionTemplateDecl::Create(
SemaRef.Context, DC, Loc, DeductionGuideName, TemplateParams, Guide);
GuideTemplate->setImplicit();
Guide->setDescribedFunctionTemplate(GuideTemplate);
if (isa<CXXRecordDecl>(DC)) {
Guide->setAccess(AS_public);
GuideTemplate->setAccess(AS_public);
}
DC->addDecl(GuideTemplate);
return GuideTemplate;
}
};
}
void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc) {
DeclContext *DC = Template->getDeclContext();
if (DC->isDependentContext())
return;
ConvertConstructorToDeductionGuideTransform Transform(
*this, cast<ClassTemplateDecl>(Template));
if (!isCompleteType(Loc, Transform.DeducedType))
return;
// Check whether we've already declared deduction guides for this template.
// FIXME: Consider storing a flag on the template to indicate this.
auto Existing = DC->lookup(Transform.DeductionGuideName);
for (auto *D : Existing)
if (D->isImplicit())
return;
// In case we were expanding a pack when we attempted to declare deduction
// guides, turn off pack expansion for everything we're about to do.
ArgumentPackSubstitutionIndexRAII SubstIndex(*this, -1);
// Create a template instantiation record to track the "instantiation" of
// constructors into deduction guides.
// FIXME: Add a kind for this to give more meaningful diagnostics. But can
// this substitution process actually fail?
InstantiatingTemplate BuildingDeductionGuides(*this, Loc, Template);
if (BuildingDeductionGuides.isInvalid())
return;
// Convert declared constructors into deduction guide templates.
// FIXME: Skip constructors for which deduction must necessarily fail (those
// for which some class template parameter without a default argument never
// appears in a deduced context).
bool AddedAny = false;
for (NamedDecl *D : LookupConstructors(Transform.Primary)) {
D = D->getUnderlyingDecl();
if (D->isInvalidDecl() || D->isImplicit())
continue;
D = cast<NamedDecl>(D->getCanonicalDecl());
auto *FTD = dyn_cast<FunctionTemplateDecl>(D);
auto *CD =
dyn_cast_or_null<CXXConstructorDecl>(FTD ? FTD->getTemplatedDecl() : D);
// Class-scope explicit specializations (MS extension) do not result in
// deduction guides.
if (!CD || (!FTD && CD->isFunctionTemplateSpecialization()))
continue;
Transform.transformConstructor(FTD, CD);
AddedAny = true;
}
// C++17 [over.match.class.deduct]
// -- If C is not defined or does not declare any constructors, an
// additional function template derived as above from a hypothetical
// constructor C().
if (!AddedAny)
Transform.buildSimpleDeductionGuide(None);
// -- An additional function template derived as above from a hypothetical
// constructor C(C), called the copy deduction candidate.
cast<CXXDeductionGuideDecl>(
cast<FunctionTemplateDecl>(
Transform.buildSimpleDeductionGuide(Transform.DeducedType))
->getTemplatedDecl())
->setIsCopyDeductionCandidate();
}
/// Diagnose the presence of a default template argument on a
/// template parameter, which is ill-formed in certain contexts.
///
/// \returns true if the default template argument should be dropped.
static bool DiagnoseDefaultTemplateArgument(Sema &S,
Sema::TemplateParamListContext TPC,
SourceLocation ParamLoc,
SourceRange DefArgRange) {
switch (TPC) {
case Sema::TPC_ClassTemplate:
case Sema::TPC_VarTemplate:
case Sema::TPC_TypeAliasTemplate:
return false;
case Sema::TPC_FunctionTemplate:
case Sema::TPC_FriendFunctionTemplateDefinition:
// C++ [temp.param]p9:
// A default template-argument shall not be specified in a
// function template declaration or a function template
// definition [...]
// If a friend function template declaration specifies a default
// template-argument, that declaration shall be a definition and shall be
// the only declaration of the function template in the translation unit.
// (C++98/03 doesn't have this wording; see DR226).
S.Diag(ParamLoc, S.getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_template_parameter_default_in_function_template
: diag::ext_template_parameter_default_in_function_template)
<< DefArgRange;
return false;
case Sema::TPC_ClassTemplateMember:
// C++0x [temp.param]p9:
// A default template-argument shall not be specified in the
// template-parameter-lists of the definition of a member of a
// class template that appears outside of the member's class.
S.Diag(ParamLoc, diag::err_template_parameter_default_template_member)
<< DefArgRange;
return true;
case Sema::TPC_FriendClassTemplate:
case Sema::TPC_FriendFunctionTemplate:
// C++ [temp.param]p9:
// A default template-argument shall not be specified in a
// friend template declaration.
S.Diag(ParamLoc, diag::err_template_parameter_default_friend_template)
<< DefArgRange;
return true;
// FIXME: C++0x [temp.param]p9 allows default template-arguments
// for friend function templates if there is only a single
// declaration (and it is a definition). Strange!
}
llvm_unreachable("Invalid TemplateParamListContext!");
}
/// Check for unexpanded parameter packs within the template parameters
/// of a template template parameter, recursively.
static bool DiagnoseUnexpandedParameterPacks(Sema &S,
TemplateTemplateParmDecl *TTP) {
// A template template parameter which is a parameter pack is also a pack
// expansion.
if (TTP->isParameterPack())
return false;
TemplateParameterList *Params = TTP->getTemplateParameters();
for (unsigned I = 0, N = Params->size(); I != N; ++I) {
NamedDecl *P = Params->getParam(I);
if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P)) {
if (!NTTP->isParameterPack() &&
S.DiagnoseUnexpandedParameterPack(NTTP->getLocation(),
NTTP->getTypeSourceInfo(),
Sema::UPPC_NonTypeTemplateParameterType))
return true;
continue;
}
if (TemplateTemplateParmDecl *InnerTTP
= dyn_cast<TemplateTemplateParmDecl>(P))
if (DiagnoseUnexpandedParameterPacks(S, InnerTTP))
return true;
}
return false;
}
/// Checks the validity of a template parameter list, possibly
/// considering the template parameter list from a previous
/// declaration.
///
/// If an "old" template parameter list is provided, it must be
/// equivalent (per TemplateParameterListsAreEqual) to the "new"
/// template parameter list.
///
/// \param NewParams Template parameter list for a new template
/// declaration. This template parameter list will be updated with any
/// default arguments that are carried through from the previous
/// template parameter list.
///
/// \param OldParams If provided, template parameter list from a
/// previous declaration of the same template. Default template
/// arguments will be merged from the old template parameter list to
/// the new template parameter list.
///
/// \param TPC Describes the context in which we are checking the given
/// template parameter list.
///
/// \returns true if an error occurred, false otherwise.
bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC) {
bool Invalid = false;
// C++ [temp.param]p10:
// The set of default template-arguments available for use with a
// template declaration or definition is obtained by merging the
// default arguments from the definition (if in scope) and all
// declarations in scope in the same way default function
// arguments are (8.3.6).
bool SawDefaultArgument = false;
SourceLocation PreviousDefaultArgLoc;
// Dummy initialization to avoid warnings.
TemplateParameterList::iterator OldParam = NewParams->end();
if (OldParams)
OldParam = OldParams->begin();
bool RemoveDefaultArguments = false;
for (TemplateParameterList::iterator NewParam = NewParams->begin(),
NewParamEnd = NewParams->end();
NewParam != NewParamEnd; ++NewParam) {
// Variables used to diagnose redundant default arguments
bool RedundantDefaultArg = false;
SourceLocation OldDefaultLoc;
SourceLocation NewDefaultLoc;
// Variable used to diagnose missing default arguments
bool MissingDefaultArg = false;
// Variable used to diagnose non-final parameter packs
bool SawParameterPack = false;
if (TemplateTypeParmDecl *NewTypeParm
= dyn_cast<TemplateTypeParmDecl>(*NewParam)) {
// Check the presence of a default argument here.
if (NewTypeParm->hasDefaultArgument() &&
DiagnoseDefaultTemplateArgument(*this, TPC,
NewTypeParm->getLocation(),
NewTypeParm->getDefaultArgumentInfo()->getTypeLoc()
.getSourceRange()))
NewTypeParm->removeDefaultArgument();
// Merge default arguments for template type parameters.
TemplateTypeParmDecl *OldTypeParm
= OldParams? cast<TemplateTypeParmDecl>(*OldParam) : nullptr;
if (NewTypeParm->isParameterPack()) {
assert(!NewTypeParm->hasDefaultArgument() &&
"Parameter packs can't have a default argument!");
SawParameterPack = true;
} else if (OldTypeParm && hasVisibleDefaultArgument(OldTypeParm) &&
NewTypeParm->hasDefaultArgument()) {
OldDefaultLoc = OldTypeParm->getDefaultArgumentLoc();
NewDefaultLoc = NewTypeParm->getDefaultArgumentLoc();
SawDefaultArgument = true;
RedundantDefaultArg = true;
PreviousDefaultArgLoc = NewDefaultLoc;
} else if (OldTypeParm && OldTypeParm->hasDefaultArgument()) {
// Merge the default argument from the old declaration to the
// new declaration.
NewTypeParm->setInheritedDefaultArgument(Context, OldTypeParm);
PreviousDefaultArgLoc = OldTypeParm->getDefaultArgumentLoc();
} else if (NewTypeParm->hasDefaultArgument()) {
SawDefaultArgument = true;
PreviousDefaultArgLoc = NewTypeParm->getDefaultArgumentLoc();
} else if (SawDefaultArgument)
MissingDefaultArg = true;
} else if (NonTypeTemplateParmDecl *NewNonTypeParm
= dyn_cast<NonTypeTemplateParmDecl>(*NewParam)) {
// Check for unexpanded parameter packs.
if (!NewNonTypeParm->isParameterPack() &&
DiagnoseUnexpandedParameterPack(NewNonTypeParm->getLocation(),
NewNonTypeParm->getTypeSourceInfo(),
UPPC_NonTypeTemplateParameterType)) {
Invalid = true;
continue;
}
// Check the presence of a default argument here.
if (NewNonTypeParm->hasDefaultArgument() &&
DiagnoseDefaultTemplateArgument(*this, TPC,
NewNonTypeParm->getLocation(),
NewNonTypeParm->getDefaultArgument()->getSourceRange())) {
NewNonTypeParm->removeDefaultArgument();
}
// Merge default arguments for non-type template parameters
NonTypeTemplateParmDecl *OldNonTypeParm
= OldParams? cast<NonTypeTemplateParmDecl>(*OldParam) : nullptr;
if (NewNonTypeParm->isParameterPack()) {
assert(!NewNonTypeParm->hasDefaultArgument() &&
"Parameter packs can't have a default argument!");
if (!NewNonTypeParm->isPackExpansion())
SawParameterPack = true;
} else if (OldNonTypeParm && hasVisibleDefaultArgument(OldNonTypeParm) &&
NewNonTypeParm->hasDefaultArgument()) {
OldDefaultLoc = OldNonTypeParm->getDefaultArgumentLoc();
NewDefaultLoc = NewNonTypeParm->getDefaultArgumentLoc();
SawDefaultArgument = true;
RedundantDefaultArg = true;
PreviousDefaultArgLoc = NewDefaultLoc;
} else if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument()) {
// Merge the default argument from the old declaration to the
// new declaration.
NewNonTypeParm->setInheritedDefaultArgument(Context, OldNonTypeParm);
PreviousDefaultArgLoc = OldNonTypeParm->getDefaultArgumentLoc();
} else if (NewNonTypeParm->hasDefaultArgument()) {
SawDefaultArgument = true;
PreviousDefaultArgLoc = NewNonTypeParm->getDefaultArgumentLoc();
} else if (SawDefaultArgument)
MissingDefaultArg = true;
} else {
TemplateTemplateParmDecl *NewTemplateParm
= cast<TemplateTemplateParmDecl>(*NewParam);
// Check for unexpanded parameter packs, recursively.
if (::DiagnoseUnexpandedParameterPacks(*this, NewTemplateParm)) {
Invalid = true;
continue;
}
// Check the presence of a default argument here.
if (NewTemplateParm->hasDefaultArgument() &&
DiagnoseDefaultTemplateArgument(*this, TPC,
NewTemplateParm->getLocation(),
NewTemplateParm->getDefaultArgument().getSourceRange()))
NewTemplateParm->removeDefaultArgument();
// Merge default arguments for template template parameters
TemplateTemplateParmDecl *OldTemplateParm
= OldParams? cast<TemplateTemplateParmDecl>(*OldParam) : nullptr;
if (NewTemplateParm->isParameterPack()) {
assert(!NewTemplateParm->hasDefaultArgument() &&
"Parameter packs can't have a default argument!");
if (!NewTemplateParm->isPackExpansion())
SawParameterPack = true;
} else if (OldTemplateParm &&
hasVisibleDefaultArgument(OldTemplateParm) &&
NewTemplateParm->hasDefaultArgument()) {
OldDefaultLoc = OldTemplateParm->getDefaultArgument().getLocation();
NewDefaultLoc = NewTemplateParm->getDefaultArgument().getLocation();
SawDefaultArgument = true;
RedundantDefaultArg = true;
PreviousDefaultArgLoc = NewDefaultLoc;
} else if (OldTemplateParm && OldTemplateParm->hasDefaultArgument()) {
// Merge the default argument from the old declaration to the
// new declaration.
NewTemplateParm->setInheritedDefaultArgument(Context, OldTemplateParm);
PreviousDefaultArgLoc
= OldTemplateParm->getDefaultArgument().getLocation();
} else if (NewTemplateParm->hasDefaultArgument()) {
SawDefaultArgument = true;
PreviousDefaultArgLoc
= NewTemplateParm->getDefaultArgument().getLocation();
} else if (SawDefaultArgument)
MissingDefaultArg = true;
}
// C++11 [temp.param]p11:
// If a template parameter of a primary class template or alias template
// is a template parameter pack, it shall be the last template parameter.
if (SawParameterPack && (NewParam + 1) != NewParamEnd &&
(TPC == TPC_ClassTemplate || TPC == TPC_VarTemplate ||
TPC == TPC_TypeAliasTemplate)) {
Diag((*NewParam)->getLocation(),
diag::err_template_param_pack_must_be_last_template_parameter);
Invalid = true;
}
if (RedundantDefaultArg) {
// C++ [temp.param]p12:
// A template-parameter shall not be given default arguments
// by two different declarations in the same scope.
Diag(NewDefaultLoc, diag::err_template_param_default_arg_redefinition);
Diag(OldDefaultLoc, diag::note_template_param_prev_default_arg);
Invalid = true;
} else if (MissingDefaultArg && TPC != TPC_FunctionTemplate) {
// C++ [temp.param]p11:
// If a template-parameter of a class template has a default
// template-argument, each subsequent template-parameter shall either
// have a default template-argument supplied or be a template parameter
// pack.
Diag((*NewParam)->getLocation(),
diag::err_template_param_default_arg_missing);
Diag(PreviousDefaultArgLoc, diag::note_template_param_prev_default_arg);
Invalid = true;
RemoveDefaultArguments = true;
}
// If we have an old template parameter list that we're merging
// in, move on to the next parameter.
if (OldParams)
++OldParam;
}
// We were missing some default arguments at the end of the list, so remove
// all of the default arguments.
if (RemoveDefaultArguments) {
for (TemplateParameterList::iterator NewParam = NewParams->begin(),
NewParamEnd = NewParams->end();
NewParam != NewParamEnd; ++NewParam) {
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*NewParam))
TTP->removeDefaultArgument();
else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*NewParam))
NTTP->removeDefaultArgument();
else
cast<TemplateTemplateParmDecl>(*NewParam)->removeDefaultArgument();
}
}
return Invalid;
}
namespace {
/// A class which looks for a use of a certain level of template
/// parameter.
struct DependencyChecker : RecursiveASTVisitor<DependencyChecker> {
typedef RecursiveASTVisitor<DependencyChecker> super;
unsigned Depth;
// Whether we're looking for a use of a template parameter that makes the
// overall construct type-dependent / a dependent type. This is strictly
// best-effort for now; we may fail to match at all for a dependent type
// in some cases if this is set.
bool IgnoreNonTypeDependent;
bool Match;
SourceLocation MatchLoc;
DependencyChecker(unsigned Depth, bool IgnoreNonTypeDependent)
: Depth(Depth), IgnoreNonTypeDependent(IgnoreNonTypeDependent),
Match(false) {}
DependencyChecker(TemplateParameterList *Params, bool IgnoreNonTypeDependent)
: IgnoreNonTypeDependent(IgnoreNonTypeDependent), Match(false) {
NamedDecl *ND = Params->getParam(0);
if (TemplateTypeParmDecl *PD = dyn_cast<TemplateTypeParmDecl>(ND)) {
Depth = PD->getDepth();
} else if (NonTypeTemplateParmDecl *PD =
dyn_cast<NonTypeTemplateParmDecl>(ND)) {
Depth = PD->getDepth();
} else {
Depth = cast<TemplateTemplateParmDecl>(ND)->getDepth();
}
}
bool Matches(unsigned ParmDepth, SourceLocation Loc = SourceLocation()) {
if (ParmDepth >= Depth) {
Match = true;
MatchLoc = Loc;
return true;
}
return false;
}
bool TraverseStmt(Stmt *S, DataRecursionQueue *Q = nullptr) {
// Prune out non-type-dependent expressions if requested. This can
// sometimes result in us failing to find a template parameter reference
// (if a value-dependent expression creates a dependent type), but this
// mode is best-effort only.
if (auto *E = dyn_cast_or_null<Expr>(S))
if (IgnoreNonTypeDependent && !E->isTypeDependent())
return true;
return super::TraverseStmt(S, Q);
}
bool TraverseTypeLoc(TypeLoc TL) {
if (IgnoreNonTypeDependent && !TL.isNull() &&
!TL.getType()->isDependentType())
return true;
return super::TraverseTypeLoc(TL);
}
bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
return !Matches(TL.getTypePtr()->getDepth(), TL.getNameLoc());
}
bool VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
// For a best-effort search, keep looking until we find a location.
return IgnoreNonTypeDependent || !Matches(T->getDepth());
}
bool TraverseTemplateName(TemplateName N) {
if (TemplateTemplateParmDecl *PD =
dyn_cast_or_null<TemplateTemplateParmDecl>(N.getAsTemplateDecl()))
if (Matches(PD->getDepth()))
return false;
return super::TraverseTemplateName(N);
}
bool VisitDeclRefExpr(DeclRefExpr *E) {
if (NonTypeTemplateParmDecl *PD =
dyn_cast<NonTypeTemplateParmDecl>(E->getDecl()))
if (Matches(PD->getDepth(), E->getExprLoc()))
return false;
return super::VisitDeclRefExpr(E);
}
bool VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
return TraverseType(T->getReplacementType());
}
bool
VisitSubstTemplateTypeParmPackType(const SubstTemplateTypeParmPackType *T) {
return TraverseTemplateArgument(T->getArgumentPack());
}
bool TraverseInjectedClassNameType(const InjectedClassNameType *T) {
return TraverseType(T->getInjectedSpecializationType());
}
};
} // end anonymous namespace
/// Determines whether a given type depends on the given parameter
/// list.
static bool
DependsOnTemplateParameters(QualType T, TemplateParameterList *Params) {
DependencyChecker Checker(Params, /*IgnoreNonTypeDependent*/false);
Checker.TraverseType(T);
return Checker.Match;
}
// Find the source range corresponding to the named type in the given
// nested-name-specifier, if any.
static SourceRange getRangeOfTypeInNestedNameSpecifier(ASTContext &Context,
QualType T,
const CXXScopeSpec &SS) {
NestedNameSpecifierLoc NNSLoc(SS.getScopeRep(), SS.location_data());
while (NestedNameSpecifier *NNS = NNSLoc.getNestedNameSpecifier()) {
if (const Type *CurType = NNS->getAsType()) {
if (Context.hasSameUnqualifiedType(T, QualType(CurType, 0)))
return NNSLoc.getTypeLoc().getSourceRange();
} else
break;
NNSLoc = NNSLoc.getPrefix();
}
return SourceRange();
}
/// Match the given template parameter lists to the given scope
/// specifier, returning the template parameter list that applies to the
/// name.
///
/// \param DeclStartLoc the start of the declaration that has a scope
/// specifier or a template parameter list.
///
/// \param DeclLoc The location of the declaration itself.
///
/// \param SS the scope specifier that will be matched to the given template
/// parameter lists. This scope specifier precedes a qualified name that is
/// being declared.
///
/// \param TemplateId The template-id following the scope specifier, if there
/// is one. Used to check for a missing 'template<>'.
///
/// \param ParamLists the template parameter lists, from the outermost to the
/// innermost template parameter lists.
///
/// \param IsFriend Whether to apply the slightly different rules for
/// matching template parameters to scope specifiers in friend
/// declarations.
///
/// \param IsMemberSpecialization will be set true if the scope specifier
/// denotes a fully-specialized type, and therefore this is a declaration of
/// a member specialization.
///
/// \returns the template parameter list, if any, that corresponds to the
/// name that is preceded by the scope specifier @p SS. This template
/// parameter list may have template parameters (if we're declaring a
/// template) or may have no template parameters (if we're declaring a
/// template specialization), or may be NULL (if what we're declaring isn't
/// itself a template).
TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS,
TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend,
bool &IsMemberSpecialization, bool &Invalid) {
IsMemberSpecialization = false;
Invalid = false;
// The sequence of nested types to which we will match up the template
// parameter lists. We first build this list by starting with the type named
// by the nested-name-specifier and walking out until we run out of types.
SmallVector<QualType, 4> NestedTypes;
QualType T;
if (SS.getScopeRep()) {
if (CXXRecordDecl *Record
= dyn_cast_or_null<CXXRecordDecl>(computeDeclContext(SS, true)))
T = Context.getTypeDeclType(Record);
else
T = QualType(SS.getScopeRep()->getAsType(), 0);
}
// If we found an explicit specialization that prevents us from needing
// 'template<>' headers, this will be set to the location of that
// explicit specialization.
SourceLocation ExplicitSpecLoc;
while (!T.isNull()) {
NestedTypes.push_back(T);
// Retrieve the parent of a record type.
if (CXXRecordDecl *Record = T->getAsCXXRecordDecl()) {
// If this type is an explicit specialization, we're done.
if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(Record)) {
if (!isa<ClassTemplatePartialSpecializationDecl>(Spec) &&
Spec->getSpecializationKind() == TSK_ExplicitSpecialization) {
ExplicitSpecLoc = Spec->getLocation();
break;
}
} else if (Record->getTemplateSpecializationKind()
== TSK_ExplicitSpecialization) {
ExplicitSpecLoc = Record->getLocation();
break;
}
if (TypeDecl *Parent = dyn_cast<TypeDecl>(Record->getParent()))
T = Context.getTypeDeclType(Parent);
else
T = QualType();
continue;
}
if (const TemplateSpecializationType *TST
= T->getAs<TemplateSpecializationType>()) {
if (TemplateDecl *Template = TST->getTemplateName().getAsTemplateDecl()) {
if (TypeDecl *Parent = dyn_cast<TypeDecl>(Template->getDeclContext()))
T = Context.getTypeDeclType(Parent);
else
T = QualType();
continue;
}
}
// Look one step prior in a dependent template specialization type.
if (const DependentTemplateSpecializationType *DependentTST
= T->getAs<DependentTemplateSpecializationType>()) {
if (NestedNameSpecifier *NNS = DependentTST->getQualifier())
T = QualType(NNS->getAsType(), 0);
else
T = QualType();
continue;
}
// Look one step prior in a dependent name type.
if (const DependentNameType *DependentName = T->getAs<DependentNameType>()){
if (NestedNameSpecifier *NNS = DependentName->getQualifier())
T = QualType(NNS->getAsType(), 0);
else
T = QualType();
continue;
}
// Retrieve the parent of an enumeration type.
if (const EnumType *EnumT = T->getAs<EnumType>()) {
// FIXME: Forward-declared enums require a TSK_ExplicitSpecialization
// check here.
EnumDecl *Enum = EnumT->getDecl();
// Get to the parent type.
if (TypeDecl *Parent = dyn_cast<TypeDecl>(Enum->getParent()))
T = Context.getTypeDeclType(Parent);
else
T = QualType();
continue;
}
T = QualType();
}
// Reverse the nested types list, since we want to traverse from the outermost
// to the innermost while checking template-parameter-lists.
std::reverse(NestedTypes.begin(), NestedTypes.end());
// C++0x [temp.expl.spec]p17:
// A member or a member template may be nested within many
// enclosing class templates. In an explicit specialization for
// such a member, the member declaration shall be preceded by a
// template<> for each enclosing class template that is
// explicitly specialized.
bool SawNonEmptyTemplateParameterList = false;
auto CheckExplicitSpecialization = [&](SourceRange Range, bool Recovery) {
if (SawNonEmptyTemplateParameterList) {
Diag(DeclLoc, diag::err_specialize_member_of_template)
<< !Recovery << Range;
Invalid = true;
IsMemberSpecialization = false;
return true;
}
return false;
};
auto DiagnoseMissingExplicitSpecialization = [&] (SourceRange Range) {
// Check that we can have an explicit specialization here.
if (CheckExplicitSpecialization(Range, true))
return true;
// We don't have a template header, but we should.
SourceLocation ExpectedTemplateLoc;
if (!ParamLists.empty())
ExpectedTemplateLoc = ParamLists[0]->getTemplateLoc();
else
ExpectedTemplateLoc = DeclStartLoc;
Diag(DeclLoc, diag::err_template_spec_needs_header)
<< Range
<< FixItHint::CreateInsertion(ExpectedTemplateLoc, "template<> ");
return false;
};
unsigned ParamIdx = 0;
for (unsigned TypeIdx = 0, NumTypes = NestedTypes.size(); TypeIdx != NumTypes;
++TypeIdx) {
T = NestedTypes[TypeIdx];
// Whether we expect a 'template<>' header.
bool NeedEmptyTemplateHeader = false;
// Whether we expect a template header with parameters.
bool NeedNonemptyTemplateHeader = false;
// For a dependent type, the set of template parameters that we
// expect to see.
TemplateParameterList *ExpectedTemplateParams = nullptr;
// C++0x [temp.expl.spec]p15:
// A member or a member template may be nested within many enclosing
// class templates. In an explicit specialization for such a member, the
// member declaration shall be preceded by a template<> for each
// enclosing class template that is explicitly specialized.
if (CXXRecordDecl *Record = T->getAsCXXRecordDecl()) {
if (ClassTemplatePartialSpecializationDecl *Partial
= dyn_cast<ClassTemplatePartialSpecializationDecl>(Record)) {
ExpectedTemplateParams = Partial->getTemplateParameters();
NeedNonemptyTemplateHeader = true;
} else if (Record->isDependentType()) {
if (Record->getDescribedClassTemplate()) {
ExpectedTemplateParams = Record->getDescribedClassTemplate()
->getTemplateParameters();
NeedNonemptyTemplateHeader = true;
}
} else if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(Record)) {
// C++0x [temp.expl.spec]p4:
// Members of an explicitly specialized class template are defined
// in the same manner as members of normal classes, and not using
// the template<> syntax.
if (Spec->getSpecializationKind() != TSK_ExplicitSpecialization)
NeedEmptyTemplateHeader = true;
else
continue;
} else if (Record->getTemplateSpecializationKind()) {
if (Record->getTemplateSpecializationKind()
!= TSK_ExplicitSpecialization &&
TypeIdx == NumTypes - 1)
IsMemberSpecialization = true;
continue;
}
} else if (const TemplateSpecializationType *TST
= T->getAs<TemplateSpecializationType>()) {
if (TemplateDecl *Template = TST->getTemplateName().getAsTemplateDecl()) {
ExpectedTemplateParams = Template->getTemplateParameters();
NeedNonemptyTemplateHeader = true;
}
} else if (T->getAs<DependentTemplateSpecializationType>()) {
// FIXME: We actually could/should check the template arguments here
// against the corresponding template parameter list.
NeedNonemptyTemplateHeader = false;
}
// C++ [temp.expl.spec]p16:
// In an explicit specialization declaration for a member of a class
// template or a member template that ap- pears in namespace scope, the
// member template and some of its enclosing class templates may remain
// unspecialized, except that the declaration shall not explicitly
// specialize a class member template if its en- closing class templates
// are not explicitly specialized as well.
if (ParamIdx < ParamLists.size()) {
if (ParamLists[ParamIdx]->size() == 0) {
if (CheckExplicitSpecialization(ParamLists[ParamIdx]->getSourceRange(),
false))
return nullptr;
} else
SawNonEmptyTemplateParameterList = true;
}
if (NeedEmptyTemplateHeader) {
// If we're on the last of the types, and we need a 'template<>' header
// here, then it's a member specialization.
if (TypeIdx == NumTypes - 1)
IsMemberSpecialization = true;
if (ParamIdx < ParamLists.size()) {
if (ParamLists[ParamIdx]->size() > 0) {
// The header has template parameters when it shouldn't. Complain.
Diag(ParamLists[ParamIdx]->getTemplateLoc(),
diag::err_template_param_list_matches_nontemplate)
<< T
<< SourceRange(ParamLists[ParamIdx]->getLAngleLoc(),
ParamLists[ParamIdx]->getRAngleLoc())
<< getRangeOfTypeInNestedNameSpecifier(Context, T, SS);
Invalid = true;
return nullptr;
}
// Consume this template header.
++ParamIdx;
continue;
}
if (!IsFriend)
if (DiagnoseMissingExplicitSpecialization(
getRangeOfTypeInNestedNameSpecifier(Context, T, SS)))
return nullptr;
continue;
}
if (NeedNonemptyTemplateHeader) {
// In friend declarations we can have template-ids which don't
// depend on the corresponding template parameter lists. But
// assume that empty parameter lists are supposed to match this
// template-id.
if (IsFriend && T->isDependentType()) {
if (ParamIdx < ParamLists.size() &&
DependsOnTemplateParameters(T, ParamLists[ParamIdx]))
ExpectedTemplateParams = nullptr;
else
continue;
}
if (ParamIdx < ParamLists.size()) {
// Check the template parameter list, if we can.
if (ExpectedTemplateParams &&
!TemplateParameterListsAreEqual(ParamLists[ParamIdx],
ExpectedTemplateParams,
true, TPL_TemplateMatch))
Invalid = true;
if (!Invalid &&
CheckTemplateParameterList(ParamLists[ParamIdx], nullptr,
TPC_ClassTemplateMember))
Invalid = true;
++ParamIdx;
continue;
}
Diag(DeclLoc, diag::err_template_spec_needs_template_parameters)
<< T
<< getRangeOfTypeInNestedNameSpecifier(Context, T, SS);
Invalid = true;
continue;
}
}
// If there were at least as many template-ids as there were template
// parameter lists, then there are no template parameter lists remaining for
// the declaration itself.
if (ParamIdx >= ParamLists.size()) {
if (TemplateId && !IsFriend) {
// We don't have a template header for the declaration itself, but we
// should.
DiagnoseMissingExplicitSpecialization(SourceRange(TemplateId->LAngleLoc,
TemplateId->RAngleLoc));
// Fabricate an empty template parameter list for the invented header.
return TemplateParameterList::Create(Context, SourceLocation(),
SourceLocation(), None,
SourceLocation(), nullptr);
}
return nullptr;
}
// If there were too many template parameter lists, complain about that now.
if (ParamIdx < ParamLists.size() - 1) {
bool HasAnyExplicitSpecHeader = false;
bool AllExplicitSpecHeaders = true;
for (unsigned I = ParamIdx, E = ParamLists.size() - 1; I != E; ++I) {
if (ParamLists[I]->size() == 0)
HasAnyExplicitSpecHeader = true;
else
AllExplicitSpecHeaders = false;
}
Diag(ParamLists[ParamIdx]->getTemplateLoc(),
AllExplicitSpecHeaders ? diag::warn_template_spec_extra_headers
: diag::err_template_spec_extra_headers)
<< SourceRange(ParamLists[ParamIdx]->getTemplateLoc(),
ParamLists[ParamLists.size() - 2]->getRAngleLoc());
// If there was a specialization somewhere, such that 'template<>' is
// not required, and there were any 'template<>' headers, note where the
// specialization occurred.
if (ExplicitSpecLoc.isValid() && HasAnyExplicitSpecHeader)
Diag(ExplicitSpecLoc,
diag::note_explicit_template_spec_does_not_need_header)
<< NestedTypes.back();
// We have a template parameter list with no corresponding scope, which
// means that the resulting template declaration can't be instantiated
// properly (we'll end up with dependent nodes when we shouldn't).
if (!AllExplicitSpecHeaders)
Invalid = true;
}
// C++ [temp.expl.spec]p16:
// In an explicit specialization declaration for a member of a class
// template or a member template that ap- pears in namespace scope, the
// member template and some of its enclosing class templates may remain
// unspecialized, except that the declaration shall not explicitly
// specialize a class member template if its en- closing class templates
// are not explicitly specialized as well.
if (ParamLists.back()->size() == 0 &&
CheckExplicitSpecialization(ParamLists[ParamIdx]->getSourceRange(),
false))
return nullptr;
// Return the last template parameter list, which corresponds to the
// entity being declared.
return ParamLists.back();
}
void Sema::NoteAllFoundTemplates(TemplateName Name) {
if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
Diag(Template->getLocation(), diag::note_template_declared_here)
<< (isa<FunctionTemplateDecl>(Template)
? 0
: isa<ClassTemplateDecl>(Template)
? 1
: isa<VarTemplateDecl>(Template)
? 2
: isa<TypeAliasTemplateDecl>(Template) ? 3 : 4)
<< Template->getDeclName();
return;
}
if (OverloadedTemplateStorage *OST = Name.getAsOverloadedTemplate()) {
for (OverloadedTemplateStorage::iterator I = OST->begin(),
IEnd = OST->end();
I != IEnd; ++I)
Diag((*I)->getLocation(), diag::note_template_declared_here)
<< 0 << (*I)->getDeclName();
return;
}
}
static QualType
checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
const SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs) {
ASTContext &Context = SemaRef.getASTContext();
switch (BTD->getBuiltinTemplateKind()) {
case BTK__make_integer_seq: {
// Specializations of __make_integer_seq<S, T, N> are treated like
// S<T, 0, ..., N-1>.
// C++14 [inteseq.intseq]p1:
// T shall be an integer type.
if (!Converted[1].getAsType()->isIntegralType(Context)) {
SemaRef.Diag(TemplateArgs[1].getLocation(),
diag::err_integer_sequence_integral_element_type);
return QualType();
}
// C++14 [inteseq.make]p1:
// If N is negative the program is ill-formed.
TemplateArgument NumArgsArg = Converted[2];
llvm::APSInt NumArgs = NumArgsArg.getAsIntegral();
if (NumArgs < 0) {
SemaRef.Diag(TemplateArgs[2].getLocation(),
diag::err_integer_sequence_negative_length);
return QualType();
}
QualType ArgTy = NumArgsArg.getIntegralType();
TemplateArgumentListInfo SyntheticTemplateArgs;
// The type argument gets reused as the first template argument in the
// synthetic template argument list.
SyntheticTemplateArgs.addArgument(TemplateArgs[1]);
// Expand N into 0 ... N-1.
for (llvm::APSInt I(NumArgs.getBitWidth(), NumArgs.isUnsigned());
I < NumArgs; ++I) {
TemplateArgument TA(Context, I, ArgTy);
SyntheticTemplateArgs.addArgument(SemaRef.getTrivialTemplateArgumentLoc(
TA, ArgTy, TemplateArgs[2].getLocation()));
}
// The first template argument will be reused as the template decl that
// our synthetic template arguments will be applied to.
return SemaRef.CheckTemplateIdType(Converted[0].getAsTemplate(),
TemplateLoc, SyntheticTemplateArgs);
}
case BTK__type_pack_element:
// Specializations of
// __type_pack_element<Index, T_1, ..., T_N>
// are treated like T_Index.
assert(Converted.size() == 2 &&
"__type_pack_element should be given an index and a parameter pack");
// If the Index is out of bounds, the program is ill-formed.
TemplateArgument IndexArg = Converted[0], Ts = Converted[1];
llvm::APSInt Index = IndexArg.getAsIntegral();
assert(Index >= 0 && "the index used with __type_pack_element should be of "
"type std::size_t, and hence be non-negative");
if (Index >= Ts.pack_size()) {
SemaRef.Diag(TemplateArgs[0].getLocation(),
diag::err_type_pack_element_out_of_bounds);
return QualType();
}
// We simply return the type at index `Index`.
auto Nth = std::next(Ts.pack_begin(), Index.getExtValue());
return Nth->getAsType();
}
llvm_unreachable("unexpected BuiltinTemplateDecl!");
}
/// Determine whether this alias template is "enable_if_t".
static bool isEnableIfAliasTemplate(TypeAliasTemplateDecl *AliasTemplate) {
return AliasTemplate->getName().equals("enable_if_t");
}
/// Collect all of the separable terms in the given condition, which
/// might be a conjunction.
///
/// FIXME: The right answer is to convert the logical expression into
/// disjunctive normal form, so we can find the first failed term
/// within each possible clause.
static void collectConjunctionTerms(Expr *Clause,
SmallVectorImpl<Expr *> &Terms) {
if (auto BinOp = dyn_cast<BinaryOperator>(Clause->IgnoreParenImpCasts())) {
if (BinOp->getOpcode() == BO_LAnd) {
collectConjunctionTerms(BinOp->getLHS(), Terms);
collectConjunctionTerms(BinOp->getRHS(), Terms);
}
return;
}
Terms.push_back(Clause);
}
// The ranges-v3 library uses an odd pattern of a top-level "||" with
// a left-hand side that is value-dependent but never true. Identify
// the idiom and ignore that term.
static Expr *lookThroughRangesV3Condition(Preprocessor &PP, Expr *Cond) {
// Top-level '||'.
auto *BinOp = dyn_cast<BinaryOperator>(Cond->IgnoreParenImpCasts());
if (!BinOp) return Cond;
if (BinOp->getOpcode() != BO_LOr) return Cond;
// With an inner '==' that has a literal on the right-hand side.
Expr *LHS = BinOp->getLHS();
auto *InnerBinOp = dyn_cast<BinaryOperator>(LHS->IgnoreParenImpCasts());
if (!InnerBinOp) return Cond;
if (InnerBinOp->getOpcode() != BO_EQ ||
!isa<IntegerLiteral>(InnerBinOp->getRHS()))
return Cond;
// If the inner binary operation came from a macro expansion named
// CONCEPT_REQUIRES or CONCEPT_REQUIRES_, return the right-hand side
// of the '||', which is the real, user-provided condition.
SourceLocation Loc = InnerBinOp->getExprLoc();
if (!Loc.isMacroID()) return Cond;
StringRef MacroName = PP.getImmediateMacroName(Loc);
if (MacroName == "CONCEPT_REQUIRES" || MacroName == "CONCEPT_REQUIRES_")
return BinOp->getRHS();
return Cond;
}
std::pair<Expr *, std::string>
Sema::findFailedBooleanCondition(Expr *Cond, bool AllowTopLevelCond) {
Cond = lookThroughRangesV3Condition(PP, Cond);
// Separate out all of the terms in a conjunction.
SmallVector<Expr *, 4> Terms;
collectConjunctionTerms(Cond, Terms);
// Determine which term failed.
Expr *FailedCond = nullptr;
for (Expr *Term : Terms) {
Expr *TermAsWritten = Term->IgnoreParenImpCasts();
// Literals are uninteresting.
if (isa<CXXBoolLiteralExpr>(TermAsWritten) ||
isa<IntegerLiteral>(TermAsWritten))
continue;
// The initialization of the parameter from the argument is
// a constant-evaluated context.
EnterExpressionEvaluationContext ConstantEvaluated(
*this, Sema::ExpressionEvaluationContext::ConstantEvaluated);
bool Succeeded;
if (Term->EvaluateAsBooleanCondition(Succeeded, Context) &&
!Succeeded) {
FailedCond = TermAsWritten;
break;
}
}
if (!FailedCond) {
if (!AllowTopLevelCond)
return { nullptr, "" };
FailedCond = Cond->IgnoreParenImpCasts();
}
std::string Description;
{
llvm::raw_string_ostream Out(Description);
FailedCond->printPretty(Out, nullptr, getPrintingPolicy());
}
return { FailedCond, Description };
}
QualType Sema::CheckTemplateIdType(TemplateName Name,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs) {
DependentTemplateName *DTN
= Name.getUnderlying().getAsDependentTemplateName();
if (DTN && DTN->isIdentifier())
// When building a template-id where the template-name is dependent,
// assume the template is a type template. Either our assumption is
// correct, or the code is ill-formed and will be diagnosed when the
// dependent name is substituted.
return Context.getDependentTemplateSpecializationType(ETK_None,
DTN->getQualifier(),
DTN->getIdentifier(),
TemplateArgs);
TemplateDecl *Template = Name.getAsTemplateDecl();
if (!Template || isa<FunctionTemplateDecl>(Template) ||
isa<VarTemplateDecl>(Template)) {
// We might have a substituted template template parameter pack. If so,
// build a template specialization type for it.
if (Name.getAsSubstTemplateTemplateParmPack())
return Context.getTemplateSpecializationType(Name, TemplateArgs);
Diag(TemplateLoc, diag::err_template_id_not_a_type)
<< Name;
NoteAllFoundTemplates(Name);
return QualType();
}
// Check that the template argument list is well-formed for this
// template.
SmallVector<TemplateArgument, 4> Converted;
if (CheckTemplateArgumentList(Template, TemplateLoc, TemplateArgs,
false, Converted))
return QualType();
QualType CanonType;
bool InstantiationDependent = false;
if (TypeAliasTemplateDecl *AliasTemplate =
dyn_cast<TypeAliasTemplateDecl>(Template)) {
// Find the canonical type for this type alias template specialization.
TypeAliasDecl *Pattern = AliasTemplate->getTemplatedDecl();
if (Pattern->isInvalidDecl())
return QualType();
TemplateArgumentList StackTemplateArgs(TemplateArgumentList::OnStack,
Converted);
// Only substitute for the innermost template argument list.
MultiLevelTemplateArgumentList TemplateArgLists;
TemplateArgLists.addOuterTemplateArguments(&StackTemplateArgs);
unsigned Depth = AliasTemplate->getTemplateParameters()->getDepth();
for (unsigned I = 0; I < Depth; ++I)
TemplateArgLists.addOuterTemplateArguments(None);
LocalInstantiationScope Scope(*this);
InstantiatingTemplate Inst(*this, TemplateLoc, Template);
if (Inst.isInvalid())
return QualType();
CanonType = SubstType(Pattern->getUnderlyingType(),
TemplateArgLists, AliasTemplate->getLocation(),
AliasTemplate->getDeclName());
if (CanonType.isNull()) {
// If this was enable_if and we failed to find the nested type
// within enable_if in a SFINAE context, dig out the specific
// enable_if condition that failed and present that instead.
if (isEnableIfAliasTemplate(AliasTemplate)) {
if (auto DeductionInfo = isSFINAEContext()) {
if (*DeductionInfo &&
(*DeductionInfo)->hasSFINAEDiagnostic() &&
(*DeductionInfo)->peekSFINAEDiagnostic().second.getDiagID() ==
diag::err_typename_nested_not_found_enable_if &&
TemplateArgs[0].getArgument().getKind()
== TemplateArgument::Expression) {
Expr *FailedCond;
std::string FailedDescription;
std::tie(FailedCond, FailedDescription) =
findFailedBooleanCondition(
TemplateArgs[0].getSourceExpression(),
/*AllowTopLevelCond=*/true);
// Remove the old SFINAE diagnostic.
PartialDiagnosticAt OldDiag =
{SourceLocation(), PartialDiagnostic::NullDiagnostic()};
(*DeductionInfo)->takeSFINAEDiagnostic(OldDiag);
// Add a new SFINAE diagnostic specifying which condition
// failed.
(*DeductionInfo)->addSFINAEDiagnostic(
OldDiag.first,
PDiag(diag::err_typename_nested_not_found_requirement)
<< FailedDescription
<< FailedCond->getSourceRange());
}
}
}
return QualType();
}
} else if (Name.isDependent() ||
TemplateSpecializationType::anyDependentTemplateArguments(
TemplateArgs, InstantiationDependent)) {
// This class template specialization is a dependent
// type. Therefore, its canonical type is another class template
// specialization type that contains all of the converted
// arguments in canonical form. This ensures that, e.g., A<T> and
// A<T, T> have identical types when A is declared as:
//
// template<typename T, typename U = T> struct A;
CanonType = Context.getCanonicalTemplateSpecializationType(Name, Converted);
// This might work out to be a current instantiation, in which
// case the canonical type needs to be the InjectedClassNameType.
//
// TODO: in theory this could be a simple hashtable lookup; most
// changes to CurContext don't change the set of current
// instantiations.
if (isa<ClassTemplateDecl>(Template)) {
for (DeclContext *Ctx = CurContext; Ctx; Ctx = Ctx->getLookupParent()) {
// If we get out to a namespace, we're done.
if (Ctx->isFileContext()) break;
// If this isn't a record, keep looking.
CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx);
if (!Record) continue;
// Look for one of the two cases with InjectedClassNameTypes
// and check whether it's the same template.
if (!isa<ClassTemplatePartialSpecializationDecl>(Record) &&
!Record->getDescribedClassTemplate())
continue;
// Fetch the injected class name type and check whether its
// injected type is equal to the type we just built.
QualType ICNT = Context.getTypeDeclType(Record);
QualType Injected = cast<InjectedClassNameType>(ICNT)
->getInjectedSpecializationType();
if (CanonType != Injected->getCanonicalTypeInternal())
continue;
// If so, the canonical type of this TST is the injected
// class name type of the record we just found.
assert(ICNT.isCanonical());
CanonType = ICNT;
break;
}
}
} else if (ClassTemplateDecl *ClassTemplate
= dyn_cast<ClassTemplateDecl>(Template)) {
// Find the class template specialization declaration that
// corresponds to these arguments.
void *InsertPos = nullptr;
ClassTemplateSpecializationDecl *Decl
= ClassTemplate->findSpecialization(Converted, InsertPos);
if (!Decl) {
// This is the first time we have referenced this class template
// specialization. Create the canonical declaration and add it to
// the set of specializations.
Decl = ClassTemplateSpecializationDecl::Create(Context,
ClassTemplate->getTemplatedDecl()->getTagKind(),
ClassTemplate->getDeclContext(),
ClassTemplate->getTemplatedDecl()->getLocStart(),
ClassTemplate->getLocation(),
ClassTemplate,
Converted, nullptr);
ClassTemplate->AddSpecialization(Decl, InsertPos);
if (ClassTemplate->isOutOfLine())
Decl->setLexicalDeclContext(ClassTemplate->getLexicalDeclContext());
}
if (Decl->getSpecializationKind() == TSK_Undeclared) {
MultiLevelTemplateArgumentList TemplateArgLists;
TemplateArgLists.addOuterTemplateArguments(Converted);
InstantiateAttrsForDecl(TemplateArgLists, ClassTemplate->getTemplatedDecl(),
Decl);
}
// Diagnose uses of this specialization.
(void)DiagnoseUseOfDecl(Decl, TemplateLoc);
CanonType = Context.getTypeDeclType(Decl);
assert(isa<RecordType>(CanonType) &&
"type of non-dependent specialization is not a RecordType");
} else if (auto *BTD = dyn_cast<BuiltinTemplateDecl>(Template)) {
CanonType = checkBuiltinTemplateIdType(*this, BTD, Converted, TemplateLoc,
TemplateArgs);
}
// Build the fully-sugared type for this class template
// specialization, which refers back to the class template
// specialization we created or found.
return Context.getTemplateSpecializationType(Name, TemplateArgs, CanonType);
}
TypeResult
Sema::ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy TemplateD, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc,
bool IsCtorOrDtorName, bool IsClassName) {
if (SS.isInvalid())
return true;
if (!IsCtorOrDtorName && !IsClassName && SS.isSet()) {
DeclContext *LookupCtx = computeDeclContext(SS, /*EnteringContext*/false);
// C++ [temp.res]p3:
// A qualified-id that refers to a type and in which the
// nested-name-specifier depends on a template-parameter (14.6.2)
// shall be prefixed by the keyword typename to indicate that the
// qualified-id denotes a type, forming an
// elaborated-type-specifier (7.1.5.3).
if (!LookupCtx && isDependentScopeSpecifier(SS)) {
Diag(SS.getBeginLoc(), diag::err_typename_missing_template)
<< SS.getScopeRep() << TemplateII->getName();
// Recover as if 'typename' were specified.
// FIXME: This is not quite correct recovery as we don't transform SS
// into the corresponding dependent form (and we don't diagnose missing
// 'template' keywords within SS as a result).
return ActOnTypenameType(nullptr, SourceLocation(), SS, TemplateKWLoc,
TemplateD, TemplateII, TemplateIILoc, LAngleLoc,
TemplateArgsIn, RAngleLoc);
}
// Per C++ [class.qual]p2, if the template-id was an injected-class-name,
// it's not actually allowed to be used as a type in most cases. Because
// we annotate it before we know whether it's valid, we have to check for
// this case here.
auto *LookupRD = dyn_cast_or_null<CXXRecordDecl>(LookupCtx);
if (LookupRD && LookupRD->getIdentifier() == TemplateII) {
Diag(TemplateIILoc,
TemplateKWLoc.isInvalid()
? diag::err_out_of_line_qualified_id_type_names_constructor
: diag::ext_out_of_line_qualified_id_type_names_constructor)
<< TemplateII << 0 /*injected-class-name used as template name*/
<< 1 /*if any keyword was present, it was 'template'*/;
}
}
TemplateName Template = TemplateD.get();
// Translate the parser's template argument list in our AST format.
TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
translateTemplateArguments(TemplateArgsIn, TemplateArgs);
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
QualType T
= Context.getDependentTemplateSpecializationType(ETK_None,
DTN->getQualifier(),
DTN->getIdentifier(),
TemplateArgs);
// Build type-source information.
TypeLocBuilder TLB;
DependentTemplateSpecializationTypeLoc SpecTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(T);
SpecTL.setElaboratedKeywordLoc(SourceLocation());
SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateIILoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
for (unsigned I = 0, N = SpecTL.getNumArgs(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
}
QualType Result = CheckTemplateIdType(Template, TemplateIILoc, TemplateArgs);
if (Result.isNull())
return true;
// Build type-source information.
TypeLocBuilder TLB;
TemplateSpecializationTypeLoc SpecTL
= TLB.push<TemplateSpecializationTypeLoc>(Result);
SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateIILoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i)
SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
// NOTE: avoid constructing an ElaboratedTypeLoc if this is a
// constructor or destructor name (in such a case, the scope specifier
// will be attached to the enclosing Decl or Expr node).
if (SS.isNotEmpty() && !IsCtorOrDtorName) {
// Create an elaborated-type-specifier containing the nested-name-specifier.
Result = Context.getElaboratedType(ETK_None, SS.getScopeRep(), Result);
ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result);
ElabTL.setElaboratedKeywordLoc(SourceLocation());
ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
}
return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
}
TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc) {
TemplateName Template = TemplateD.get();
// Translate the parser's template argument list in our AST format.
TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
translateTemplateArguments(TemplateArgsIn, TemplateArgs);
// Determine the tag kind
TagTypeKind TagKind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
ElaboratedTypeKeyword Keyword
= TypeWithKeyword::getKeywordForTagTypeKind(TagKind);
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
QualType T = Context.getDependentTemplateSpecializationType(Keyword,
DTN->getQualifier(),
DTN->getIdentifier(),
TemplateArgs);
// Build type-source information.
TypeLocBuilder TLB;
DependentTemplateSpecializationTypeLoc SpecTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(T);
SpecTL.setElaboratedKeywordLoc(TagLoc);
SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateLoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
for (unsigned I = 0, N = SpecTL.getNumArgs(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
}
if (TypeAliasTemplateDecl *TAT =
dyn_cast_or_null<TypeAliasTemplateDecl>(Template.getAsTemplateDecl())) {
// C++0x [dcl.type.elab]p2:
// If the identifier resolves to a typedef-name or the simple-template-id
// resolves to an alias template specialization, the
// elaborated-type-specifier is ill-formed.
Diag(TemplateLoc, diag::err_tag_reference_non_tag)
<< TAT << NTK_TypeAliasTemplate << TagKind;
Diag(TAT->getLocation(), diag::note_declared_at);
}
QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs);
if (Result.isNull())
return TypeResult(true);
// Check the tag kind
if (const RecordType *RT = Result->getAs<RecordType>()) {
RecordDecl *D = RT->getDecl();
IdentifierInfo *Id = D->getIdentifier();
assert(Id && "templated class must have an identifier");
if (!isAcceptableTagRedeclaration(D, TagKind, TUK == TUK_Definition,
TagLoc, Id)) {
Diag(TagLoc, diag::err_use_with_wrong_tag)
<< Result
<< FixItHint::CreateReplacement(SourceRange(TagLoc), D->getKindName());
Diag(D->getLocation(), diag::note_previous_use);
}
}
// Provide source-location information for the template specialization.
TypeLocBuilder TLB;
TemplateSpecializationTypeLoc SpecTL
= TLB.push<TemplateSpecializationTypeLoc>(Result);
SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateLoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i)
SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
// Construct an elaborated type containing the nested-name-specifier (if any)
// and tag keyword.
Result = Context.getElaboratedType(Keyword, SS.getScopeRep(), Result);
ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result);
ElabTL.setElaboratedKeywordLoc(TagLoc);
ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
}
static bool CheckTemplateSpecializationScope(Sema &S, NamedDecl *Specialized,
NamedDecl *PrevDecl,
SourceLocation Loc,
bool IsPartialSpecialization);
static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D);
static bool isTemplateArgumentTemplateParameter(
const TemplateArgument &Arg, unsigned Depth, unsigned Index) {
switch (Arg.getKind()) {
case TemplateArgument::Null:
case TemplateArgument::NullPtr:
case TemplateArgument::Integral:
case TemplateArgument::Declaration:
case TemplateArgument::Pack:
case TemplateArgument::TemplateExpansion:
return false;
case TemplateArgument::Type: {
QualType Type = Arg.getAsType();
const TemplateTypeParmType *TPT =
Arg.getAsType()->getAs<TemplateTypeParmType>();
return TPT && !Type.hasQualifiers() &&
TPT->getDepth() == Depth && TPT->getIndex() == Index;
}
case TemplateArgument::Expression: {
DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg.getAsExpr());
if (!DRE || !DRE->getDecl())
return false;
const NonTypeTemplateParmDecl *NTTP =
dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl());
return NTTP && NTTP->getDepth() == Depth && NTTP->getIndex() == Index;
}
case TemplateArgument::Template:
const TemplateTemplateParmDecl *TTP =
dyn_cast_or_null<TemplateTemplateParmDecl>(
Arg.getAsTemplateOrTemplatePattern().getAsTemplateDecl());
return TTP && TTP->getDepth() == Depth && TTP->getIndex() == Index;
}
llvm_unreachable("unexpected kind of template argument");
}
static bool isSameAsPrimaryTemplate(TemplateParameterList *Params,
ArrayRef<TemplateArgument> Args) {
if (Params->size() != Args.size())
return false;
unsigned Depth = Params->getDepth();
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
TemplateArgument Arg = Args[I];
// If the parameter is a pack expansion, the argument must be a pack
// whose only element is a pack expansion.
if (Params->getParam(I)->isParameterPack()) {
if (Arg.getKind() != TemplateArgument::Pack || Arg.pack_size() != 1 ||
!Arg.pack_begin()->isPackExpansion())
return false;
Arg = Arg.pack_begin()->getPackExpansionPattern();
}
if (!isTemplateArgumentTemplateParameter(Arg, Depth, I))
return false;
}
return true;
}
/// Convert the parser's template argument list representation into our form.
static TemplateArgumentListInfo
makeTemplateArgumentListInfo(Sema &S, TemplateIdAnnotation &TemplateId) {
TemplateArgumentListInfo TemplateArgs(TemplateId.LAngleLoc,
TemplateId.RAngleLoc);
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId.getTemplateArgs(),
TemplateId.NumArgs);
S.translateTemplateArguments(TemplateArgsPtr, TemplateArgs);
return TemplateArgs;
}
template<typename PartialSpecDecl>
static void checkMoreSpecializedThanPrimary(Sema &S, PartialSpecDecl *Partial) {
if (Partial->getDeclContext()->isDependentContext())
return;
// FIXME: Get the TDK from deduction in order to provide better diagnostics
// for non-substitution-failure issues?
TemplateDeductionInfo Info(Partial->getLocation());
if (S.isMoreSpecializedThanPrimary(Partial, Info))
return;
auto *Template = Partial->getSpecializedTemplate();
S.Diag(Partial->getLocation(),
diag::ext_partial_spec_not_more_specialized_than_primary)
<< isa<VarTemplateDecl>(Template);
if (Info.hasSFINAEDiagnostic()) {
PartialDiagnosticAt Diag = {SourceLocation(),
PartialDiagnostic::NullDiagnostic()};
Info.takeSFINAEDiagnostic(Diag);
SmallString<128> SFINAEArgString;
Diag.second.EmitToString(S.getDiagnostics(), SFINAEArgString);
S.Diag(Diag.first,
diag::note_partial_spec_not_more_specialized_than_primary)
<< SFINAEArgString;
}
S.Diag(Template->getLocation(), diag::note_template_decl_here);
}
static void
noteNonDeducibleParameters(Sema &S, TemplateParameterList *TemplateParams,
const llvm::SmallBitVector &DeducibleParams) {
for (unsigned I = 0, N = DeducibleParams.size(); I != N; ++I) {
if (!DeducibleParams[I]) {
NamedDecl *Param = TemplateParams->getParam(I);
if (Param->getDeclName())
S.Diag(Param->getLocation(), diag::note_non_deducible_parameter)
<< Param->getDeclName();
else
S.Diag(Param->getLocation(), diag::note_non_deducible_parameter)
<< "(anonymous)";
}
}
}
template<typename PartialSpecDecl>
static void checkTemplatePartialSpecialization(Sema &S,
PartialSpecDecl *Partial) {
// C++1z [temp.class.spec]p8: (DR1495)
// - The specialization shall be more specialized than the primary
// template (14.5.5.2).
checkMoreSpecializedThanPrimary(S, Partial);
// C++ [temp.class.spec]p8: (DR1315)
// - Each template-parameter shall appear at least once in the
// template-id outside a non-deduced context.
// C++1z [temp.class.spec.match]p3 (P0127R2)
// If the template arguments of a partial specialization cannot be
// deduced because of the structure of its template-parameter-list
// and the template-id, the program is ill-formed.
auto *TemplateParams = Partial->getTemplateParameters();
llvm::SmallBitVector DeducibleParams(TemplateParams->size());
S.MarkUsedTemplateParameters(Partial->getTemplateArgs(), true,
TemplateParams->getDepth(), DeducibleParams);
if (!DeducibleParams.all()) {
unsigned NumNonDeducible = DeducibleParams.size() - DeducibleParams.count();
S.Diag(Partial->getLocation(), diag::ext_partial_specs_not_deducible)
<< isa<VarTemplatePartialSpecializationDecl>(Partial)
<< (NumNonDeducible > 1)
<< SourceRange(Partial->getLocation(),
Partial->getTemplateArgsAsWritten()->RAngleLoc);
noteNonDeducibleParameters(S, TemplateParams, DeducibleParams);
}
}
void Sema::CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial) {
checkTemplatePartialSpecialization(*this, Partial);
}
void Sema::CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial) {
checkTemplatePartialSpecialization(*this, Partial);
}
void Sema::CheckDeductionGuideTemplate(FunctionTemplateDecl *TD) {
// C++1z [temp.param]p11:
// A template parameter of a deduction guide template that does not have a
// default-argument shall be deducible from the parameter-type-list of the
// deduction guide template.
auto *TemplateParams = TD->getTemplateParameters();
llvm::SmallBitVector DeducibleParams(TemplateParams->size());
MarkDeducedTemplateParameters(TD, DeducibleParams);
for (unsigned I = 0; I != TemplateParams->size(); ++I) {
// A parameter pack is deducible (to an empty pack).
auto *Param = TemplateParams->getParam(I);
if (Param->isParameterPack() || hasVisibleDefaultArgument(Param))
DeducibleParams[I] = true;
}
if (!DeducibleParams.all()) {
unsigned NumNonDeducible = DeducibleParams.size() - DeducibleParams.count();
Diag(TD->getLocation(), diag::err_deduction_guide_template_not_deducible)
<< (NumNonDeducible > 1);
noteNonDeducibleParameters(*this, TemplateParams, DeducibleParams);
}
}
DeclResult Sema::ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc,
TemplateParameterList *TemplateParams, StorageClass SC,
bool IsPartialSpecialization) {
// D must be variable template id.
assert(D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId &&
"Variable template specialization is declared with a template it.");
TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
TemplateArgumentListInfo TemplateArgs =
makeTemplateArgumentListInfo(*this, *TemplateId);
SourceLocation TemplateNameLoc = D.getIdentifierLoc();
SourceLocation LAngleLoc = TemplateId->LAngleLoc;
SourceLocation RAngleLoc = TemplateId->RAngleLoc;
TemplateName Name = TemplateId->Template.get();
// The template-id must name a variable template.
VarTemplateDecl *VarTemplate =
dyn_cast_or_null<VarTemplateDecl>(Name.getAsTemplateDecl());
if (!VarTemplate) {
NamedDecl *FnTemplate;
if (auto *OTS = Name.getAsOverloadedTemplate())
FnTemplate = *OTS->begin();
else
FnTemplate = dyn_cast_or_null<FunctionTemplateDecl>(Name.getAsTemplateDecl());
if (FnTemplate)
return Diag(D.getIdentifierLoc(), diag::err_var_spec_no_template_but_method)
<< FnTemplate->getDeclName();
return Diag(D.getIdentifierLoc(), diag::err_var_spec_no_template)
<< IsPartialSpecialization;
}
// Check for unexpanded parameter packs in any of the template arguments.
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
if (DiagnoseUnexpandedParameterPack(TemplateArgs[I],
UPPC_PartialSpecialization))
return true;
// Check that the template argument list is well-formed for this
// template.
SmallVector<TemplateArgument, 4> Converted;
if (CheckTemplateArgumentList(VarTemplate, TemplateNameLoc, TemplateArgs,
false, Converted))
return true;
// Find the variable template (partial) specialization declaration that
// corresponds to these arguments.
if (IsPartialSpecialization) {
if (CheckTemplatePartialSpecializationArgs(TemplateNameLoc, VarTemplate,
TemplateArgs.size(), Converted))
return true;
// FIXME: Move these checks to CheckTemplatePartialSpecializationArgs so we
// also do them during instantiation.
bool InstantiationDependent;
if (!Name.isDependent() &&
!TemplateSpecializationType::anyDependentTemplateArguments(
TemplateArgs.arguments(),
InstantiationDependent)) {
Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized)
<< VarTemplate->getDeclName();
IsPartialSpecialization = false;
}
if (isSameAsPrimaryTemplate(VarTemplate->getTemplateParameters(),
Converted)) {
// C++ [temp.class.spec]p9b3:
//
// -- The argument list of the specialization shall not be identical
// to the implicit argument list of the primary template.
Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template)
<< /*variable template*/ 1
<< /*is definition*/(SC != SC_Extern && !CurContext->isRecord())
<< FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
// FIXME: Recover from this by treating the declaration as a redeclaration
// of the primary template.
return true;
}
}
void *InsertPos = nullptr;
VarTemplateSpecializationDecl *PrevDecl = nullptr;
if (IsPartialSpecialization)
// FIXME: Template parameter list matters too
PrevDecl = VarTemplate->findPartialSpecialization(Converted, InsertPos);
else
PrevDecl = VarTemplate->findSpecialization(Converted, InsertPos);
VarTemplateSpecializationDecl *Specialization = nullptr;
// Check whether we can declare a variable template specialization in
// the current scope.
if (CheckTemplateSpecializationScope(*this, VarTemplate, PrevDecl,
TemplateNameLoc,
IsPartialSpecialization))
return true;
if (PrevDecl && PrevDecl->getSpecializationKind() == TSK_Undeclared) {
// Since the only prior variable template specialization with these
// arguments was referenced but not declared, reuse that
// declaration node as our own, updating its source location and
// the list of outer template parameters to reflect our new declaration.
Specialization = PrevDecl;
Specialization->setLocation(TemplateNameLoc);
PrevDecl = nullptr;
} else if (IsPartialSpecialization) {
// Create a new class template partial specialization declaration node.
VarTemplatePartialSpecializationDecl *PrevPartial =
cast_or_null<VarTemplatePartialSpecializationDecl>(PrevDecl);
VarTemplatePartialSpecializationDecl *Partial =
VarTemplatePartialSpecializationDecl::Create(
Context, VarTemplate->getDeclContext(), TemplateKWLoc,
TemplateNameLoc, TemplateParams, VarTemplate, DI->getType(), DI, SC,
Converted, TemplateArgs);
if (!PrevPartial)
VarTemplate->AddPartialSpecialization(Partial, InsertPos);
Specialization = Partial;
// If we are providing an explicit specialization of a member variable
// template specialization, make a note of that.
if (PrevPartial && PrevPartial->getInstantiatedFromMember())
PrevPartial->setMemberSpecialization();
CheckTemplatePartialSpecialization(Partial);
} else {
// Create a new class template specialization declaration node for
// this explicit specialization or friend declaration.
Specialization = VarTemplateSpecializationDecl::Create(
Context, VarTemplate->getDeclContext(), TemplateKWLoc, TemplateNameLoc,
VarTemplate, DI->getType(), DI, SC, Converted);
Specialization->setTemplateArgsInfo(TemplateArgs);
if (!PrevDecl)
VarTemplate->AddSpecialization(Specialization, InsertPos);
}
// C++ [temp.expl.spec]p6:
// If a template, a member template or the member of a class template is
// explicitly specialized then that specialization shall be declared
// before the first use of that specialization that would cause an implicit
// instantiation to take place, in every translation unit in which such a
// use occurs; no diagnostic is required.
if (PrevDecl && PrevDecl->getPointOfInstantiation().isValid()) {
bool Okay = false;
for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
// Is there any previous explicit specialization declaration?
if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) {
Okay = true;
break;
}
}
if (!Okay) {
SourceRange Range(TemplateNameLoc, RAngleLoc);
Diag(TemplateNameLoc, diag::err_specialization_after_instantiation)
<< Name << Range;
Diag(PrevDecl->getPointOfInstantiation(),
diag::note_instantiation_required_here)
<< (PrevDecl->getTemplateSpecializationKind() !=
TSK_ImplicitInstantiation);
return true;
}
}
Specialization->setTemplateKeywordLoc(TemplateKWLoc);
Specialization->setLexicalDeclContext(CurContext);
// Add the specialization into its lexical context, so that it can
// be seen when iterating through the list of declarations in that
// context. However, specializations are not found by name lookup.
CurContext->addDecl(Specialization);
// Note that this is an explicit specialization.
Specialization->setSpecializationKind(TSK_ExplicitSpecialization);
if (PrevDecl) {
// Check that this isn't a redefinition of this specialization,
// merging with previous declarations.
LookupResult PrevSpec(*this, GetNameForDeclarator(D), LookupOrdinaryName,
forRedeclarationInCurContext());
PrevSpec.addDecl(PrevDecl);
D.setRedeclaration(CheckVariableDeclaration(Specialization, PrevSpec));
} else if (Specialization->isStaticDataMember() &&
Specialization->isOutOfLine()) {
Specialization->setAccess(VarTemplate->getAccess());
}
// Link instantiations of static data members back to the template from
// which they were instantiated.
if (Specialization->isStaticDataMember())
Specialization->setInstantiationOfStaticDataMember(
VarTemplate->getTemplatedDecl(),
Specialization->getSpecializationKind());
return Specialization;
}
namespace {
/// A partial specialization whose template arguments have matched
/// a given template-id.
struct PartialSpecMatchResult {
VarTemplatePartialSpecializationDecl *Partial;
TemplateArgumentList *Args;
};
} // end anonymous namespace
DeclResult
Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs) {
assert(Template && "A variable template id without template?");
// Check that the template argument list is well-formed for this template.
SmallVector<TemplateArgument, 4> Converted;
if (CheckTemplateArgumentList(
Template, TemplateNameLoc,
const_cast<TemplateArgumentListInfo &>(TemplateArgs), false,
Converted))
return true;
// Find the variable template specialization declaration that
// corresponds to these arguments.
void *InsertPos = nullptr;
if (VarTemplateSpecializationDecl *Spec = Template->findSpecialization(
Converted, InsertPos)) {
checkSpecializationVisibility(TemplateNameLoc, Spec);
// If we already have a variable template specialization, return it.
return Spec;
}
// This is the first time we have referenced this variable template
// specialization. Create the canonical declaration and add it to
// the set of specializations, based on the closest partial specialization
// that it represents. That is,
VarDecl *InstantiationPattern = Template->getTemplatedDecl();
TemplateArgumentList TemplateArgList(TemplateArgumentList::OnStack,
Converted);
TemplateArgumentList *InstantiationArgs = &TemplateArgList;
bool AmbiguousPartialSpec = false;
typedef PartialSpecMatchResult MatchResult;
SmallVector<MatchResult, 4> Matched;
SourceLocation PointOfInstantiation = TemplateNameLoc;
TemplateSpecCandidateSet FailedCandidates(PointOfInstantiation,
/*ForTakingAddress=*/false);
// 1. Attempt to find the closest partial specialization that this
// specializes, if any.
// If any of the template arguments is dependent, then this is probably
// a placeholder for an incomplete declarative context; which must be
// complete by instantiation time. Thus, do not search through the partial
// specializations yet.
// TODO: Unify with InstantiateClassTemplateSpecialization()?
// Perhaps better after unification of DeduceTemplateArguments() and
// getMoreSpecializedPartialSpecialization().
bool InstantiationDependent = false;
if (!TemplateSpecializationType::anyDependentTemplateArguments(
TemplateArgs, InstantiationDependent)) {
SmallVector<VarTemplatePartialSpecializationDecl *, 4> PartialSpecs;
Template->getPartialSpecializations(PartialSpecs);
for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) {
VarTemplatePartialSpecializationDecl *Partial = PartialSpecs[I];
TemplateDeductionInfo Info(FailedCandidates.getLocation());
if (TemplateDeductionResult Result =
DeduceTemplateArguments(Partial, TemplateArgList, Info)) {
// Store the failed-deduction information for use in diagnostics, later.
// TODO: Actually use the failed-deduction info?
FailedCandidates.addCandidate().set(
DeclAccessPair::make(Template, AS_public), Partial,
MakeDeductionFailureInfo(Context, Result, Info));
(void)Result;
} else {
Matched.push_back(PartialSpecMatchResult());
Matched.back().Partial = Partial;
Matched.back().Args = Info.take();
}
}
if (Matched.size() >= 1) {
SmallVector<MatchResult, 4>::iterator Best = Matched.begin();
if (Matched.size() == 1) {
// -- If exactly one matching specialization is found, the
// instantiation is generated from that specialization.
// We don't need to do anything for this.
} else {
// -- If more than one matching specialization is found, the
// partial order rules (14.5.4.2) are used to determine
// whether one of the specializations is more specialized
// than the others. If none of the specializations is more
// specialized than all of the other matching
// specializations, then the use of the variable template is
// ambiguous and the program is ill-formed.
for (SmallVector<MatchResult, 4>::iterator P = Best + 1,
PEnd = Matched.end();
P != PEnd; ++P) {
if (getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
PointOfInstantiation) ==
P->Partial)
Best = P;
}
// Determine if the best partial specialization is more specialized than
// the others.
for (SmallVector<MatchResult, 4>::iterator P = Matched.begin(),
PEnd = Matched.end();
P != PEnd; ++P) {
if (P != Best && getMoreSpecializedPartialSpecialization(
P->Partial, Best->Partial,
PointOfInstantiation) != Best->Partial) {
AmbiguousPartialSpec = true;
break;
}
}
}
// Instantiate using the best variable template partial specialization.
InstantiationPattern = Best->Partial;
InstantiationArgs = Best->Args;
} else {
// -- If no match is found, the instantiation is generated
// from the primary template.
// InstantiationPattern = Template->getTemplatedDecl();
}
}
// 2. Create the canonical declaration.
// Note that we do not instantiate a definition until we see an odr-use
// in DoMarkVarDeclReferenced().
// FIXME: LateAttrs et al.?
VarTemplateSpecializationDecl *Decl = BuildVarTemplateInstantiation(
Template, InstantiationPattern, *InstantiationArgs, TemplateArgs,
Converted, TemplateNameLoc, InsertPos /*, LateAttrs, StartingScope*/);
if (!Decl)
return true;
if (AmbiguousPartialSpec) {
// Partial ordering did not produce a clear winner. Complain.
Decl->setInvalidDecl();
Diag(PointOfInstantiation, diag::err_partial_spec_ordering_ambiguous)
<< Decl;
// Print the matching partial specializations.
for (MatchResult P : Matched)
Diag(P.Partial->getLocation(), diag::note_partial_spec_match)
<< getTemplateArgumentBindingsText(P.Partial->getTemplateParameters(),
*P.Args);
return true;
}
if (VarTemplatePartialSpecializationDecl *D =
dyn_cast<VarTemplatePartialSpecializationDecl>(InstantiationPattern))
Decl->setInstantiationOf(D, InstantiationArgs);
checkSpecializationVisibility(TemplateNameLoc, Decl);
assert(Decl && "No variable template specialization?");
return Decl;
}
ExprResult
Sema::CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template, SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs) {
DeclResult Decl = CheckVarTemplateId(Template, TemplateLoc, NameInfo.getLoc(),
*TemplateArgs);
if (Decl.isInvalid())
return ExprError();
VarDecl *Var = cast<VarDecl>(Decl.get());
if (!Var->getTemplateSpecializationKind())
Var->setTemplateSpecializationKind(TSK_ImplicitInstantiation,
NameInfo.getLoc());
// Build an ordinary singleton decl ref.
return BuildDeclarationNameExpr(SS, NameInfo, Var,
/*FoundD=*/nullptr, TemplateArgs);
}
void Sema::diagnoseMissingTemplateArguments(TemplateName Name,
SourceLocation Loc) {
Diag(Loc, diag::err_template_missing_args)
<< (int)getTemplateNameKindForDiagnostics(Name) << Name;
if (TemplateDecl *TD = Name.getAsTemplateDecl()) {
Diag(TD->getLocation(), diag::note_template_decl_here)
<< TD->getTemplateParameters()->getSourceRange();
}
}
ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs) {
// FIXME: Can we do any checking at this point? I guess we could check the
// template arguments that we have against the template name, if the template
// name refers to a single template. That's not a terribly common case,
// though.
// foo<int> could identify a single function unambiguously
// This approach does NOT work, since f<int>(1);
// gets resolved prior to resorting to overload resolution
// i.e., template<class T> void f(double);
// vs template<class T, class U> void f(U);
// These should be filtered out by our callers.
assert(!R.empty() && "empty lookup results when building templateid");
assert(!R.isAmbiguous() && "ambiguous lookup when building templateid");
// Non-function templates require a template argument list.
if (auto *TD = R.getAsSingle<TemplateDecl>()) {
if (!TemplateArgs && !isa<FunctionTemplateDecl>(TD)) {
diagnoseMissingTemplateArguments(TemplateName(TD), R.getNameLoc());
return ExprError();
}
}
auto AnyDependentArguments = [&]() -> bool {
bool InstantiationDependent;
return TemplateArgs &&
TemplateSpecializationType::anyDependentTemplateArguments(
*TemplateArgs, InstantiationDependent);
};
// In C++1y, check variable template ids.
if (R.getAsSingle<VarTemplateDecl>() && !AnyDependentArguments()) {
return CheckVarTemplateId(SS, R.getLookupNameInfo(),
R.getAsSingle<VarTemplateDecl>(),
TemplateKWLoc, TemplateArgs);
}
// We don't want lookup warnings at this point.
R.suppressDiagnostics();
UnresolvedLookupExpr *ULE
= UnresolvedLookupExpr::Create(Context, R.getNamingClass(),
SS.getWithLocInContext(Context),
TemplateKWLoc,
R.getLookupNameInfo(),
RequiresADL, TemplateArgs,
R.begin(), R.end());
return ULE;
}
// We actually only call this from template instantiation.
ExprResult
Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
assert(TemplateArgs || TemplateKWLoc.isValid());
DeclContext *DC;
if (!(DC = computeDeclContext(SS, false)) ||
DC->isDependentContext() ||
RequireCompleteDeclContext(SS, DC))
return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
bool MemberOfUnknownSpecialization;
LookupResult R(*this, NameInfo, LookupOrdinaryName);
if (LookupTemplateName(R, (Scope *)nullptr, SS, QualType(),
/*Entering*/false, MemberOfUnknownSpecialization,
TemplateKWLoc))
return ExprError();
if (R.isAmbiguous())
return ExprError();
if (R.empty()) {
Diag(NameInfo.getLoc(), diag::err_no_member)
<< NameInfo.getName() << DC << SS.getRange();
return ExprError();
}
if (ClassTemplateDecl *Temp = R.getAsSingle<ClassTemplateDecl>()) {
Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_class_template)
<< SS.getScopeRep()
<< NameInfo.getName().getAsString() << SS.getRange();
Diag(Temp->getLocation(), diag::note_referenced_class_template);
return ExprError();
}
return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs);
}
/// Form a dependent template name.
///
/// This action forms a dependent template name given the template
/// name and its (presumably dependent) scope specifier. For
/// example, given "MetaFun::template apply", the scope specifier \p
/// SS will be "MetaFun::", \p TemplateKWLoc contains the location
/// of the "template" keyword, and "apply" is the \p Name.
TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Result,
bool AllowInjectedClassName) {
if (TemplateKWLoc.isValid() && S && !S->getTemplateParamParent())
Diag(TemplateKWLoc,
getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_template_outside_of_template :
diag::ext_template_outside_of_template)
<< FixItHint::CreateRemoval(TemplateKWLoc);
DeclContext *LookupCtx = nullptr;
if (SS.isSet())
LookupCtx = computeDeclContext(SS, EnteringContext);
if (!LookupCtx && ObjectType)
LookupCtx = computeDeclContext(ObjectType.get());
if (LookupCtx) {
// C++0x [temp.names]p5:
// If a name prefixed by the keyword template is not the name of
// a template, the program is ill-formed. [Note: the keyword
// template may not be applied to non-template members of class
// templates. -end note ] [ Note: as is the case with the
// typename prefix, the template prefix is allowed in cases
// where it is not strictly necessary; i.e., when the
// nested-name-specifier or the expression on the left of the ->
// or . is not dependent on a template-parameter, or the use
// does not appear in the scope of a template. -end note]
//
// Note: C++03 was more strict here, because it banned the use of
// the "template" keyword prior to a template-name that was not a
// dependent name. C++ DR468 relaxed this requirement (the
// "template" keyword is now permitted). We follow the C++0x
// rules, even in C++03 mode with a warning, retroactively applying the DR.
bool MemberOfUnknownSpecialization;
TemplateNameKind TNK = isTemplateName(S, SS, TemplateKWLoc.isValid(), Name,
ObjectType, EnteringContext, Result,
MemberOfUnknownSpecialization);
if (TNK == TNK_Non_template && MemberOfUnknownSpecialization) {
// This is a dependent template. Handle it below.
} else if (TNK == TNK_Non_template) {
// Do the lookup again to determine if this is a "nothing found" case or
// a "not a template" case. FIXME: Refactor isTemplateName so we don't
// need to do this.
DeclarationNameInfo DNI = GetNameFromUnqualifiedId(Name);
LookupResult R(*this, DNI.getName(), Name.getLocStart(),
LookupOrdinaryName);
bool MOUS;
if (!LookupTemplateName(R, S, SS, ObjectType.get(), EnteringContext,
MOUS, TemplateKWLoc))
Diag(Name.getLocStart(), diag::err_no_member)
<< DNI.getName() << LookupCtx << SS.getRange();
return TNK_Non_template;
} else {
// We found something; return it.
auto *LookupRD = dyn_cast<CXXRecordDecl>(LookupCtx);
if (!AllowInjectedClassName && SS.isSet() && LookupRD &&
Name.getKind() == UnqualifiedIdKind::IK_Identifier &&
Name.Identifier && LookupRD->getIdentifier() == Name.Identifier) {
// C++14 [class.qual]p2:
// In a lookup in which function names are not ignored and the
// nested-name-specifier nominates a class C, if the name specified
// [...] is the injected-class-name of C, [...] the name is instead
// considered to name the constructor
//
// We don't get here if naming the constructor would be valid, so we
// just reject immediately and recover by treating the
// injected-class-name as naming the template.
Diag(Name.getLocStart(),
diag::ext_out_of_line_qualified_id_type_names_constructor)
<< Name.Identifier << 0 /*injected-class-name used as template name*/
<< 1 /*'template' keyword was used*/;
}
return TNK;
}
}
NestedNameSpecifier *Qualifier = SS.getScopeRep();
switch (Name.getKind()) {
case UnqualifiedIdKind::IK_Identifier:
Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
Name.Identifier));
return TNK_Dependent_template_name;
case UnqualifiedIdKind::IK_OperatorFunctionId:
Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
Name.OperatorFunctionId.Operator));
return TNK_Function_template;
case UnqualifiedIdKind::IK_LiteralOperatorId:
llvm_unreachable("literal operator id cannot have a dependent scope");
default:
break;
}
Diag(Name.getLocStart(),
diag::err_template_kw_refers_to_non_template)
<< GetNameFromUnqualifiedId(Name).getName()
<< Name.getSourceRange()
<< TemplateKWLoc;
return TNK_Non_template;
}
bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &AL,
SmallVectorImpl<TemplateArgument> &Converted) {
const TemplateArgument &Arg = AL.getArgument();
QualType ArgType;
TypeSourceInfo *TSI = nullptr;
// Check template type parameter.
switch(Arg.getKind()) {
case TemplateArgument::Type:
// C++ [temp.arg.type]p1:
// A template-argument for a template-parameter which is a
// type shall be a type-id.
ArgType = Arg.getAsType();
TSI = AL.getTypeSourceInfo();
break;
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion: {
// We have a template type parameter but the template argument
// is a template without any arguments.
SourceRange SR = AL.getSourceRange();
TemplateName Name = Arg.getAsTemplateOrTemplatePattern();
diagnoseMissingTemplateArguments(Name, SR.getEnd());
return true;
}
case TemplateArgument::Expression: {
// We have a template type parameter but the template argument is an
// expression; see if maybe it is missing the "typename" keyword.
CXXScopeSpec SS;
DeclarationNameInfo NameInfo;
if (DeclRefExpr *ArgExpr = dyn_cast<DeclRefExpr>(Arg.getAsExpr())) {
SS.Adopt(ArgExpr->getQualifierLoc());
NameInfo = ArgExpr->getNameInfo();
} else if (DependentScopeDeclRefExpr *ArgExpr =
dyn_cast<DependentScopeDeclRefExpr>(Arg.getAsExpr())) {
SS.Adopt(ArgExpr->getQualifierLoc());
NameInfo = ArgExpr->getNameInfo();
} else if (CXXDependentScopeMemberExpr *ArgExpr =
dyn_cast<CXXDependentScopeMemberExpr>(Arg.getAsExpr())) {
if (ArgExpr->isImplicitAccess()) {
SS.Adopt(ArgExpr->getQualifierLoc());
NameInfo = ArgExpr->getMemberNameInfo();
}
}
if (auto *II = NameInfo.getName().getAsIdentifierInfo()) {
LookupResult Result(*this, NameInfo, LookupOrdinaryName);
LookupParsedName(Result, CurScope, &SS);
if (Result.getAsSingle<TypeDecl>() ||
Result.getResultKind() ==
LookupResult::NotFoundInCurrentInstantiation) {
// Suggest that the user add 'typename' before the NNS.
SourceLocation Loc = AL.getSourceRange().getBegin();
Diag(Loc, getLangOpts().MSVCCompat
? diag::ext_ms_template_type_arg_missing_typename
: diag::err_template_arg_must_be_type_suggest)
<< FixItHint::CreateInsertion(Loc, "typename ");
Diag(Param->getLocation(), diag::note_template_param_here);
// Recover by synthesizing a type using the location information that we
// already have.
ArgType =
Context.getDependentNameType(ETK_Typename, SS.getScopeRep(), II);
TypeLocBuilder TLB;
DependentNameTypeLoc TL = TLB.push<DependentNameTypeLoc>(ArgType);
TL.setElaboratedKeywordLoc(SourceLocation(/*synthesized*/));
TL.setQualifierLoc(SS.getWithLocInContext(Context));
TL.setNameLoc(NameInfo.getLoc());
TSI = TLB.getTypeSourceInfo(Context, ArgType);
// Overwrite our input TemplateArgumentLoc so that we can recover
// properly.
AL = TemplateArgumentLoc(TemplateArgument(ArgType),
TemplateArgumentLocInfo(TSI));
break;
}
}
// fallthrough
LLVM_FALLTHROUGH;
}
default: {
// We have a template type parameter but the template argument
// is not a type.
SourceRange SR = AL.getSourceRange();
Diag(SR.getBegin(), diag::err_template_arg_must_be_type) << SR;
Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
}
if (CheckTemplateArgument(Param, TSI))
return true;
// Add the converted template type argument.
ArgType = Context.getCanonicalType(ArgType);
// Objective-C ARC:
// If an explicitly-specified template argument type is a lifetime type
// with no lifetime qualifier, the __strong lifetime qualifier is inferred.
if (getLangOpts().ObjCAutoRefCount &&
ArgType->isObjCLifetimeType() &&
!ArgType.getObjCLifetime()) {
Qualifiers Qs;
Qs.setObjCLifetime(Qualifiers::OCL_Strong);
ArgType = Context.getQualifiedType(ArgType, Qs);
}
Converted.push_back(TemplateArgument(ArgType));
return false;
}
/// Substitute template arguments into the default template argument for
/// the given template type parameter.
///
/// \param SemaRef the semantic analysis object for which we are performing
/// the substitution.
///
/// \param Template the template that we are synthesizing template arguments
/// for.
///
/// \param TemplateLoc the location of the template name that started the
/// template-id we are checking.
///
/// \param RAngleLoc the location of the right angle bracket ('>') that
/// terminates the template-id.
///
/// \param Param the template template parameter whose default we are
/// substituting into.
///
/// \param Converted the list of template arguments provided for template
/// parameters that precede \p Param in the template parameter list.
/// \returns the substituted template argument, or NULL if an error occurred.
static TypeSourceInfo *
SubstDefaultTemplateArgument(Sema &SemaRef,
TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
TemplateTypeParmDecl *Param,
SmallVectorImpl<TemplateArgument> &Converted) {
TypeSourceInfo *ArgType = Param->getDefaultArgumentInfo();
// If the argument type is dependent, instantiate it now based
// on the previously-computed template arguments.
if (ArgType->getType()->isDependentType()) {
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
Param, Template, Converted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
return nullptr;
TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted);
// Only substitute for the innermost template argument list.
MultiLevelTemplateArgumentList TemplateArgLists;
TemplateArgLists.addOuterTemplateArguments(&TemplateArgs);
for (unsigned i = 0, e = Param->getDepth(); i != e; ++i)
TemplateArgLists.addOuterTemplateArguments(None);
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
ArgType =
SemaRef.SubstType(ArgType, TemplateArgLists,
Param->getDefaultArgumentLoc(), Param->getDeclName());
}
return ArgType;
}
/// Substitute template arguments into the default template argument for
/// the given non-type template parameter.
///
/// \param SemaRef the semantic analysis object for which we are performing
/// the substitution.
///
/// \param Template the template that we are synthesizing template arguments
/// for.
///
/// \param TemplateLoc the location of the template name that started the
/// template-id we are checking.
///
/// \param RAngleLoc the location of the right angle bracket ('>') that
/// terminates the template-id.
///
/// \param Param the non-type template parameter whose default we are
/// substituting into.
///
/// \param Converted the list of template arguments provided for template
/// parameters that precede \p Param in the template parameter list.
///
/// \returns the substituted template argument, or NULL if an error occurred.
static ExprResult
SubstDefaultTemplateArgument(Sema &SemaRef,
TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
NonTypeTemplateParmDecl *Param,
SmallVectorImpl<TemplateArgument> &Converted) {
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
Param, Template, Converted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
return ExprError();
TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted);
// Only substitute for the innermost template argument list.
MultiLevelTemplateArgumentList TemplateArgLists;
TemplateArgLists.addOuterTemplateArguments(&TemplateArgs);
for (unsigned i = 0, e = Param->getDepth(); i != e; ++i)
TemplateArgLists.addOuterTemplateArguments(None);
EnterExpressionEvaluationContext ConstantEvaluated(
SemaRef, Sema::ExpressionEvaluationContext::ConstantEvaluated);
return SemaRef.SubstExpr(Param->getDefaultArgument(), TemplateArgLists);
}
/// Substitute template arguments into the default template argument for
/// the given template template parameter.
///
/// \param SemaRef the semantic analysis object for which we are performing
/// the substitution.
///
/// \param Template the template that we are synthesizing template arguments
/// for.
///
/// \param TemplateLoc the location of the template name that started the
/// template-id we are checking.
///
/// \param RAngleLoc the location of the right angle bracket ('>') that
/// terminates the template-id.
///
/// \param Param the template template parameter whose default we are
/// substituting into.
///
/// \param Converted the list of template arguments provided for template
/// parameters that precede \p Param in the template parameter list.
///
/// \param QualifierLoc Will be set to the nested-name-specifier (with
/// source-location information) that precedes the template name.
///
/// \returns the substituted template argument, or NULL if an error occurred.
static TemplateName
SubstDefaultTemplateArgument(Sema &SemaRef,
TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
TemplateTemplateParmDecl *Param,
SmallVectorImpl<TemplateArgument> &Converted,
NestedNameSpecifierLoc &QualifierLoc) {
Sema::InstantiatingTemplate Inst(
SemaRef, TemplateLoc, TemplateParameter(Param), Template, Converted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
return TemplateName();
TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted);
// Only substitute for the innermost template argument list.
MultiLevelTemplateArgumentList TemplateArgLists;
TemplateArgLists.addOuterTemplateArguments(&TemplateArgs);
for (unsigned i = 0, e = Param->getDepth(); i != e; ++i)
TemplateArgLists.addOuterTemplateArguments(None);
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext());
// Substitute into the nested-name-specifier first,
QualifierLoc = Param->getDefaultArgument().getTemplateQualifierLoc();
if (QualifierLoc) {
QualifierLoc =
SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc, TemplateArgLists);
if (!QualifierLoc)
return TemplateName();
}
return SemaRef.SubstTemplateName(
QualifierLoc,
Param->getDefaultArgument().getArgument().getAsTemplate(),
Param->getDefaultArgument().getTemplateNameLoc(),
TemplateArgLists);
}
/// If the given template parameter has a default template
/// argument, substitute into that default template argument and
/// return the corresponding template argument.
TemplateArgumentLoc
Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg) {
HasDefaultArg = false;
if (TemplateTypeParmDecl *TypeParm = dyn_cast<TemplateTypeParmDecl>(Param)) {
if (!hasVisibleDefaultArgument(TypeParm))
return TemplateArgumentLoc();
HasDefaultArg = true;
TypeSourceInfo *DI = SubstDefaultTemplateArgument(*this, Template,
TemplateLoc,
RAngleLoc,
TypeParm,
Converted);
if (DI)
return TemplateArgumentLoc(TemplateArgument(DI->getType()), DI);
return TemplateArgumentLoc();
}
if (NonTypeTemplateParmDecl *NonTypeParm
= dyn_cast<NonTypeTemplateParmDecl>(Param)) {
if (!hasVisibleDefaultArgument(NonTypeParm))
return TemplateArgumentLoc();
HasDefaultArg = true;
ExprResult Arg = SubstDefaultTemplateArgument(*this, Template,
TemplateLoc,
RAngleLoc,
NonTypeParm,
Converted);
if (Arg.isInvalid())
return TemplateArgumentLoc();
Expr *ArgE = Arg.getAs<Expr>();
return TemplateArgumentLoc(TemplateArgument(ArgE), ArgE);
}
TemplateTemplateParmDecl *TempTempParm
= cast<TemplateTemplateParmDecl>(Param);
if (!hasVisibleDefaultArgument(TempTempParm))
return TemplateArgumentLoc();
HasDefaultArg = true;
NestedNameSpecifierLoc QualifierLoc;
TemplateName TName = SubstDefaultTemplateArgument(*this, Template,
TemplateLoc,
RAngleLoc,
TempTempParm,
Converted,
QualifierLoc);
if (TName.isNull())
return TemplateArgumentLoc();
return TemplateArgumentLoc(TemplateArgument(TName),
TempTempParm->getDefaultArgument().getTemplateQualifierLoc(),
TempTempParm->getDefaultArgument().getTemplateNameLoc());
}
/// Convert a template-argument that we parsed as a type into a template, if
/// possible. C++ permits injected-class-names to perform dual service as
/// template template arguments and as template type arguments.
static TemplateArgumentLoc convertTypeTemplateArgumentToTemplate(TypeLoc TLoc) {
// Extract and step over any surrounding nested-name-specifier.
NestedNameSpecifierLoc QualLoc;
if (auto ETLoc = TLoc.getAs<ElaboratedTypeLoc>()) {
if (ETLoc.getTypePtr()->getKeyword() != ETK_None)
return TemplateArgumentLoc();
QualLoc = ETLoc.getQualifierLoc();
TLoc = ETLoc.getNamedTypeLoc();
}
// If this type was written as an injected-class-name, it can be used as a
// template template argument.
if (auto InjLoc = TLoc.getAs<InjectedClassNameTypeLoc>())
return TemplateArgumentLoc(InjLoc.getTypePtr()->getTemplateName(),
QualLoc, InjLoc.getNameLoc());
// If this type was written as an injected-class-name, it may have been
// converted to a RecordType during instantiation. If the RecordType is
// *not* wrapped in a TemplateSpecializationType and denotes a class
// template specialization, it must have come from an injected-class-name.
if (auto RecLoc = TLoc.getAs<RecordTypeLoc>())
if (auto *CTSD =
dyn_cast<ClassTemplateSpecializationDecl>(RecLoc.getDecl()))
return TemplateArgumentLoc(TemplateName(CTSD->getSpecializedTemplate()),
QualLoc, RecLoc.getNameLoc());
return TemplateArgumentLoc();
}
/// Check that the given template argument corresponds to the given
/// template parameter.
///
/// \param Param The template parameter against which the argument will be
/// checked.
///
/// \param Arg The template argument, which may be updated due to conversions.
///
/// \param Template The template in which the template argument resides.
///
/// \param TemplateLoc The location of the template name for the template
/// whose argument list we're matching.
///
/// \param RAngleLoc The location of the right angle bracket ('>') that closes
/// the template argument list.
///
/// \param ArgumentPackIndex The index into the argument pack where this
/// argument will be placed. Only valid if the parameter is a parameter pack.
///
/// \param Converted The checked, converted argument will be added to the
/// end of this small vector.
///
/// \param CTAK Describes how we arrived at this particular template argument:
/// explicitly written, deduced, etc.
///
/// \returns true on error, false otherwise.
bool Sema::CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK) {
// Check template type parameters.
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
return CheckTemplateTypeArgument(TTP, Arg, Converted);
// Check non-type template parameters.
if (NonTypeTemplateParmDecl *NTTP =dyn_cast<NonTypeTemplateParmDecl>(Param)) {
// Do substitution on the type of the non-type template parameter
// with the template arguments we've seen thus far. But if the
// template has a dependent context then we cannot substitute yet.
QualType NTTPType = NTTP->getType();
if (NTTP->isParameterPack() && NTTP->isExpandedParameterPack())
NTTPType = NTTP->getExpansionType(ArgumentPackIndex);
// FIXME: Do we need to substitute into parameters here if they're
// instantiation-dependent but not dependent?
if (NTTPType->isDependentType() &&
!isa<TemplateTemplateParmDecl>(Template) &&
!Template->getDeclContext()->isDependentContext()) {
// Do substitution on the type of the non-type template parameter.
InstantiatingTemplate Inst(*this, TemplateLoc, Template,
NTTP, Converted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
return true;
TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
Converted);
NTTPType = SubstType(NTTPType,
MultiLevelTemplateArgumentList(TemplateArgs),
NTTP->getLocation(),
NTTP->getDeclName());
// If that worked, check the non-type template parameter type
// for validity.
if (!NTTPType.isNull())
NTTPType = CheckNonTypeTemplateParameterType(NTTPType,
NTTP->getLocation());
if (NTTPType.isNull())
return true;
}
switch (Arg.getArgument().getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Should never see a NULL template argument here");
case TemplateArgument::Expression: {
TemplateArgument Result;
unsigned CurSFINAEErrors = NumSFINAEErrors;
ExprResult Res =
CheckTemplateArgument(NTTP, NTTPType, Arg.getArgument().getAsExpr(),
Result, CTAK);
if (Res.isInvalid())
return true;
// If the current template argument causes an error, give up now.
if (CurSFINAEErrors < NumSFINAEErrors)
return true;
// If the resulting expression is new, then use it in place of the
// old expression in the template argument.
if (Res.get() != Arg.getArgument().getAsExpr()) {
TemplateArgument TA(Res.get());
Arg = TemplateArgumentLoc(TA, Res.get());
}
Converted.push_back(Result);
break;
}
case TemplateArgument::Declaration:
case TemplateArgument::Integral:
case TemplateArgument::NullPtr:
// We've already checked this template argument, so just copy
// it to the list of converted arguments.
Converted.push_back(Arg.getArgument());
break;
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
// We were given a template template argument. It may not be ill-formed;
// see below.
if (DependentTemplateName *DTN
= Arg.getArgument().getAsTemplateOrTemplatePattern()
.getAsDependentTemplateName()) {
// We have a template argument such as \c T::template X, which we
// parsed as a template template argument. However, since we now
// know that we need a non-type template argument, convert this
// template name into an expression.
DeclarationNameInfo NameInfo(DTN->getIdentifier(),
Arg.getTemplateNameLoc());
CXXScopeSpec SS;
SS.Adopt(Arg.getTemplateQualifierLoc());
// FIXME: the template-template arg was a DependentTemplateName,
// so it was provided with a template keyword. However, its source
// location is not stored in the template argument structure.
SourceLocation TemplateKWLoc;
ExprResult E = DependentScopeDeclRefExpr::Create(
Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo,
nullptr);
// If we parsed the template argument as a pack expansion, create a
// pack expansion expression.
if (Arg.getArgument().getKind() == TemplateArgument::TemplateExpansion){
E = ActOnPackExpansion(E.get(), Arg.getTemplateEllipsisLoc());
if (E.isInvalid())
return true;
}
TemplateArgument Result;
E = CheckTemplateArgument(NTTP, NTTPType, E.get(), Result);
if (E.isInvalid())
return true;
Converted.push_back(Result);
break;
}
// We have a template argument that actually does refer to a class
// template, alias template, or template template parameter, and
// therefore cannot be a non-type template argument.
Diag(Arg.getLocation(), diag::err_template_arg_must_be_expr)
<< Arg.getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
return true;
case TemplateArgument::Type: {
// We have a non-type template parameter but the template
// argument is a type.
// C++ [temp.arg]p2:
// In a template-argument, an ambiguity between a type-id and
// an expression is resolved to a type-id, regardless of the
// form of the corresponding template-parameter.
//
// We warn specifically about this case, since it can be rather
// confusing for users.
QualType T = Arg.getArgument().getAsType();
SourceRange SR = Arg.getSourceRange();
if (T->isFunctionType())
Diag(SR.getBegin(), diag::err_template_arg_nontype_ambig) << SR << T;
else
Diag(SR.getBegin(), diag::err_template_arg_must_be_expr) << SR;
Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
case TemplateArgument::Pack:
llvm_unreachable("Caller must expand template argument packs");
}
return false;
}
// Check template template parameters.
TemplateTemplateParmDecl *TempParm = cast<TemplateTemplateParmDecl>(Param);
TemplateParameterList *Params = TempParm->getTemplateParameters();
if (TempParm->isExpandedParameterPack())
Params = TempParm->getExpansionTemplateParameters(ArgumentPackIndex);
// Substitute into the template parameter list of the template
// template parameter, since previously-supplied template arguments
// may appear within the template template parameter.
//
// FIXME: Skip this if the parameters aren't instantiation-dependent.
{
// Set up a template instantiation context.
LocalInstantiationScope Scope(*this);
InstantiatingTemplate Inst(*this, TemplateLoc, Template,
TempParm, Converted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
return true;
TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Converted);
Params = SubstTemplateParams(Params, CurContext,
MultiLevelTemplateArgumentList(TemplateArgs));
if (!Params)
return true;
}
// C++1z [temp.local]p1: (DR1004)
// When [the injected-class-name] is used [...] as a template-argument for
// a template template-parameter [...] it refers to the class template
// itself.
if (Arg.getArgument().getKind() == TemplateArgument::Type) {
TemplateArgumentLoc ConvertedArg = convertTypeTemplateArgumentToTemplate(
Arg.getTypeSourceInfo()->getTypeLoc());
if (!ConvertedArg.getArgument().isNull())
Arg = ConvertedArg;
}
switch (Arg.getArgument().getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Should never see a NULL template argument here");
case TemplateArgument::Template:
case TemplateArgument::TemplateExpansion:
if (CheckTemplateTemplateArgument(Params, Arg))
return true;
Converted.push_back(Arg.getArgument());
break;
case TemplateArgument::Expression:
case TemplateArgument::Type:
// We have a template template parameter but the template
// argument does not refer to a template.
Diag(Arg.getLocation(), diag::err_template_arg_must_be_template)
<< getLangOpts().CPlusPlus11;
return true;
case TemplateArgument::Declaration:
llvm_unreachable("Declaration argument with template template parameter");
case TemplateArgument::Integral:
llvm_unreachable("Integral argument with template template parameter");
case TemplateArgument::NullPtr:
llvm_unreachable("Null pointer argument with template template parameter");
case TemplateArgument::Pack:
llvm_unreachable("Caller must expand template argument packs");
}
return false;
}
/// Check whether the template parameter is a pack expansion, and if so,
/// determine the number of parameters produced by that expansion. For instance:
///
/// \code
/// template<typename ...Ts> struct A {
/// template<Ts ...NTs, template<Ts> class ...TTs, typename ...Us> struct B;
/// };
/// \endcode
///
/// In \c A<int,int>::B, \c NTs and \c TTs have expanded pack size 2, and \c Us
/// is not a pack expansion, so returns an empty Optional.
static Optional<unsigned> getExpandedPackSize(NamedDecl *Param) {
if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(Param)) {
if (NTTP->isExpandedParameterPack())
return NTTP->getNumExpansionTypes();
}
if (TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(Param)) {
if (TTP->isExpandedParameterPack())
return TTP->getNumExpansionTemplateParameters();
}
return None;
}
/// Diagnose a missing template argument.
template<typename TemplateParmDecl>
static bool diagnoseMissingArgument(Sema &S, SourceLocation Loc,
TemplateDecl *TD,
const TemplateParmDecl *D,
TemplateArgumentListInfo &Args) {
// Dig out the most recent declaration of the template parameter; there may be
// declarations of the template that are more recent than TD.
D = cast<TemplateParmDecl>(cast<TemplateDecl>(TD->getMostRecentDecl())
->getTemplateParameters()
->getParam(D->getIndex()));
// If there's a default argument that's not visible, diagnose that we're
// missing a module import.
llvm::SmallVector<Module*, 8> Modules;
if (D->hasDefaultArgument() && !S.hasVisibleDefaultArgument(D, &Modules)) {
S.diagnoseMissingImport(Loc, cast<NamedDecl>(TD),
D->getDefaultArgumentLoc(), Modules,
Sema::MissingImportKind::DefaultArgument,
/*Recover*/true);
return true;
}
// FIXME: If there's a more recent default argument that *is* visible,
// diagnose that it was declared too late.
TemplateParameterList *Params = TD->getTemplateParameters();
S.Diag(Loc, diag::err_template_arg_list_different_arity)
<< /*not enough args*/0
<< (int)S.getTemplateNameKindForDiagnostics(TemplateName(TD))
<< TD;
S.Diag(TD->getLocation(), diag::note_template_decl_here)
<< Params->getSourceRange();
return true;
}
/// Check that the given template argument list is well-formed
/// for specializing the given template.
bool Sema::CheckTemplateArgumentList(
TemplateDecl *Template, SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions) {
// Make a copy of the template arguments for processing. Only make the
// changes at the end when successful in matching the arguments to the
// template.
TemplateArgumentListInfo NewArgs = TemplateArgs;
// Make sure we get the template parameter list from the most
// recentdeclaration, since that is the only one that has is guaranteed to
// have all the default template argument information.
TemplateParameterList *Params =
cast<TemplateDecl>(Template->getMostRecentDecl())
->getTemplateParameters();
SourceLocation RAngleLoc = NewArgs.getRAngleLoc();
// C++ [temp.arg]p1:
// [...] The type and form of each template-argument specified in
// a template-id shall match the type and form specified for the
// corresponding parameter declared by the template in its
// template-parameter-list.
bool isTemplateTemplateParameter = isa<TemplateTemplateParmDecl>(Template);
SmallVector<TemplateArgument, 2> ArgumentPack;
unsigned ArgIdx = 0, NumArgs = NewArgs.size();
LocalInstantiationScope InstScope(*this, true);
for (TemplateParameterList::iterator Param = Params->begin(),
ParamEnd = Params->end();
Param != ParamEnd; /* increment in loop */) {
// If we have an expanded parameter pack, make sure we don't have too
// many arguments.
if (Optional<unsigned> Expansions = getExpandedPackSize(*Param)) {
if (*Expansions == ArgumentPack.size()) {
// We're done with this parameter pack. Pack up its arguments and add
// them to the list.
Converted.push_back(
TemplateArgument::CreatePackCopy(Context, ArgumentPack));
ArgumentPack.clear();
// This argument is assigned to the next parameter.
++Param;
continue;
} else if (ArgIdx == NumArgs && !PartialTemplateArgs) {
// Not enough arguments for this parameter pack.
Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
<< /*not enough args*/0
<< (int)getTemplateNameKindForDiagnostics(TemplateName(Template))
<< Template;
Diag(Template->getLocation(), diag::note_template_decl_here)
<< Params->getSourceRange();
return true;
}
}
if (ArgIdx < NumArgs) {
// Check the template argument we were given.
if (CheckTemplateArgument(*Param, NewArgs[ArgIdx], Template,
TemplateLoc, RAngleLoc,
ArgumentPack.size(), Converted))
return true;
bool PackExpansionIntoNonPack =
NewArgs[ArgIdx].getArgument().isPackExpansion() &&
(!(*Param)->isTemplateParameterPack() || getExpandedPackSize(*Param));
if (PackExpansionIntoNonPack && isa<TypeAliasTemplateDecl>(Template)) {
// Core issue 1430: we have a pack expansion as an argument to an
// alias template, and it's not part of a parameter pack. This
// can't be canonicalized, so reject it now.
Diag(NewArgs[ArgIdx].getLocation(),
diag::err_alias_template_expansion_into_fixed_list)
<< NewArgs[ArgIdx].getSourceRange();
Diag((*Param)->getLocation(), diag::note_template_param_here);
return true;
}
// We're now done with this argument.
++ArgIdx;
if ((*Param)->isTemplateParameterPack()) {
// The template parameter was a template parameter pack, so take the
// deduced argument and place it on the argument pack. Note that we
// stay on the same template parameter so that we can deduce more
// arguments.
ArgumentPack.push_back(Converted.pop_back_val());
} else {
// Move to the next template parameter.
++Param;
}
// If we just saw a pack expansion into a non-pack, then directly convert
// the remaining arguments, because we don't know what parameters they'll
// match up with.
if (PackExpansionIntoNonPack) {
if (!ArgumentPack.empty()) {
// If we were part way through filling in an expanded parameter pack,
// fall back to just producing individual arguments.
Converted.insert(Converted.end(),
ArgumentPack.begin(), ArgumentPack.end());
ArgumentPack.clear();
}
while (ArgIdx < NumArgs) {
Converted.push_back(NewArgs[ArgIdx].getArgument());
++ArgIdx;
}
return false;
}
continue;
}
// If we're checking a partial template argument list, we're done.
if (PartialTemplateArgs) {
if ((*Param)->isTemplateParameterPack() && !ArgumentPack.empty())
Converted.push_back(
TemplateArgument::CreatePackCopy(Context, ArgumentPack));
return false;
}
// If we have a template parameter pack with no more corresponding
// arguments, just break out now and we'll fill in the argument pack below.
if ((*Param)->isTemplateParameterPack()) {
assert(!getExpandedPackSize(*Param) &&
"Should have dealt with this already");
// A non-expanded parameter pack before the end of the parameter list
// only occurs for an ill-formed template parameter list, unless we've
// got a partial argument list for a function template, so just bail out.
if (Param + 1 != ParamEnd)
return true;
Converted.push_back(
TemplateArgument::CreatePackCopy(Context, ArgumentPack));
ArgumentPack.clear();
++Param;
continue;
}
// Check whether we have a default argument.
TemplateArgumentLoc Arg;
// Retrieve the default template argument from the template
// parameter. For each kind of template parameter, we substitute the
// template arguments provided thus far and any "outer" template arguments
// (when the template parameter was part of a nested template) into
// the default argument.
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
if (!hasVisibleDefaultArgument(TTP))
return diagnoseMissingArgument(*this, TemplateLoc, Template, TTP,
NewArgs);
TypeSourceInfo *ArgType = SubstDefaultTemplateArgument(*this,
Template,
TemplateLoc,
RAngleLoc,
TTP,
Converted);
if (!ArgType)
return true;
Arg = TemplateArgumentLoc(TemplateArgument(ArgType->getType()),
ArgType);
} else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
if (!hasVisibleDefaultArgument(NTTP))
return diagnoseMissingArgument(*this, TemplateLoc, Template, NTTP,
NewArgs);
ExprResult E = SubstDefaultTemplateArgument(*this, Template,
TemplateLoc,
RAngleLoc,
NTTP,
Converted);
if (E.isInvalid())
return true;
Expr *Ex = E.getAs<Expr>();
Arg = TemplateArgumentLoc(TemplateArgument(Ex), Ex);
} else {
TemplateTemplateParmDecl *TempParm
= cast<TemplateTemplateParmDecl>(*Param);
if (!hasVisibleDefaultArgument(TempParm))
return diagnoseMissingArgument(*this, TemplateLoc, Template, TempParm,
NewArgs);
NestedNameSpecifierLoc QualifierLoc;
TemplateName Name = SubstDefaultTemplateArgument(*this, Template,
TemplateLoc,
RAngleLoc,
TempParm,
Converted,
QualifierLoc);
if (Name.isNull())
return true;
Arg = TemplateArgumentLoc(TemplateArgument(Name), QualifierLoc,
TempParm->getDefaultArgument().getTemplateNameLoc());
}
// Introduce an instantiation record that describes where we are using
// the default template argument. We're not actually instantiating a
// template here, we just create this object to put a note into the
// context stack.
InstantiatingTemplate Inst(*this, RAngleLoc, Template, *Param, Converted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
return true;
// Check the default template argument.
if (CheckTemplateArgument(*Param, Arg, Template, TemplateLoc,
RAngleLoc, 0, Converted))
return true;
// Core issue 150 (assumed resolution): if this is a template template
// parameter, keep track of the default template arguments from the
// template definition.
if (isTemplateTemplateParameter)
NewArgs.addArgument(Arg);
// Move to the next template parameter and argument.
++Param;
++ArgIdx;
}
// If we're performing a partial argument substitution, allow any trailing
// pack expansions; they might be empty. This can happen even if
// PartialTemplateArgs is false (the list of arguments is complete but
// still dependent).
if (ArgIdx < NumArgs && CurrentInstantiationScope &&
CurrentInstantiationScope->getPartiallySubstitutedPack()) {
while (ArgIdx < NumArgs && NewArgs[ArgIdx].getArgument().isPackExpansion())
Converted.push_back(NewArgs[ArgIdx++].getArgument());
}
// If we have any leftover arguments, then there were too many arguments.
// Complain and fail.
if (ArgIdx < NumArgs) {
Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
<< /*too many args*/1
<< (int)getTemplateNameKindForDiagnostics(TemplateName(Template))
<< Template
<< SourceRange(NewArgs[ArgIdx].getLocation(), NewArgs.getRAngleLoc());
Diag(Template->getLocation(), diag::note_template_decl_here)
<< Params->getSourceRange();
return true;
}
// No problems found with the new argument list, propagate changes back
// to caller.
if (UpdateArgsWithConversions)
TemplateArgs = std::move(NewArgs);
return false;
}
namespace {
class UnnamedLocalNoLinkageFinder
: public TypeVisitor<UnnamedLocalNoLinkageFinder, bool>
{
Sema &S;
SourceRange SR;
typedef TypeVisitor<UnnamedLocalNoLinkageFinder, bool> inherited;
public:
UnnamedLocalNoLinkageFinder(Sema &S, SourceRange SR) : S(S), SR(SR) { }
bool Visit(QualType T) {
return T.isNull() ? false : inherited::Visit(T.getTypePtr());
}
#define TYPE(Class, Parent) \
bool Visit##Class##Type(const Class##Type *);
#define ABSTRACT_TYPE(Class, Parent) \
bool Visit##Class##Type(const Class##Type *) { return false; }
#define NON_CANONICAL_TYPE(Class, Parent) \
bool Visit##Class##Type(const Class##Type *) { return false; }
#include "clang/AST/TypeNodes.def"
bool VisitTagDecl(const TagDecl *Tag);
bool VisitNestedNameSpecifier(NestedNameSpecifier *NNS);
};
} // end anonymous namespace
bool UnnamedLocalNoLinkageFinder::VisitBuiltinType(const BuiltinType*) {
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitComplexType(const ComplexType* T) {
return Visit(T->getElementType());
}
bool UnnamedLocalNoLinkageFinder::VisitPointerType(const PointerType* T) {
return Visit(T->getPointeeType());
}
bool UnnamedLocalNoLinkageFinder::VisitBlockPointerType(
const BlockPointerType* T) {
return Visit(T->getPointeeType());
}
bool UnnamedLocalNoLinkageFinder::VisitLValueReferenceType(
const LValueReferenceType* T) {
return Visit(T->getPointeeType());
}
bool UnnamedLocalNoLinkageFinder::VisitRValueReferenceType(
const RValueReferenceType* T) {
return Visit(T->getPointeeType());
}
bool UnnamedLocalNoLinkageFinder::VisitMemberPointerType(
const MemberPointerType* T) {
return Visit(T->getPointeeType()) || Visit(QualType(T->getClass(), 0));
}
bool UnnamedLocalNoLinkageFinder::VisitConstantArrayType(
const ConstantArrayType* T) {
return Visit(T->getElementType());
}
bool UnnamedLocalNoLinkageFinder::VisitIncompleteArrayType(
const IncompleteArrayType* T) {
return Visit(T->getElementType());
}
bool UnnamedLocalNoLinkageFinder::VisitVariableArrayType(
const VariableArrayType* T) {
return Visit(T->getElementType());
}
bool UnnamedLocalNoLinkageFinder::VisitDependentSizedArrayType(
const DependentSizedArrayType* T) {
return Visit(T->getElementType());
}
bool UnnamedLocalNoLinkageFinder::VisitDependentSizedExtVectorType(
const DependentSizedExtVectorType* T) {
return Visit(T->getElementType());
}
bool UnnamedLocalNoLinkageFinder::VisitDependentAddressSpaceType(
const DependentAddressSpaceType *T) {
return Visit(T->getPointeeType());
}
bool UnnamedLocalNoLinkageFinder::VisitVectorType(const VectorType* T) {
return Visit(T->getElementType());
}
bool UnnamedLocalNoLinkageFinder::VisitDependentVectorType(
const DependentVectorType *T) {
return Visit(T->getElementType());
}
bool UnnamedLocalNoLinkageFinder::VisitExtVectorType(const ExtVectorType* T) {
return Visit(T->getElementType());
}
bool UnnamedLocalNoLinkageFinder::VisitFunctionProtoType(
const FunctionProtoType* T) {
for (const auto &A : T->param_types()) {
if (Visit(A))
return true;
}
return Visit(T->getReturnType());
}
bool UnnamedLocalNoLinkageFinder::VisitFunctionNoProtoType(
const FunctionNoProtoType* T) {
return Visit(T->getReturnType());
}
bool UnnamedLocalNoLinkageFinder::VisitUnresolvedUsingType(
const UnresolvedUsingType*) {
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitTypeOfExprType(const TypeOfExprType*) {
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitTypeOfType(const TypeOfType* T) {
return Visit(T->getUnderlyingType());
}
bool UnnamedLocalNoLinkageFinder::VisitDecltypeType(const DecltypeType*) {
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitUnaryTransformType(
const UnaryTransformType*) {
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitAutoType(const AutoType *T) {
return Visit(T->getDeducedType());
}
bool UnnamedLocalNoLinkageFinder::VisitDeducedTemplateSpecializationType(
const DeducedTemplateSpecializationType *T) {
return Visit(T->getDeducedType());
}
bool UnnamedLocalNoLinkageFinder::VisitRecordType(const RecordType* T) {
return VisitTagDecl(T->getDecl());
}
bool UnnamedLocalNoLinkageFinder::VisitEnumType(const EnumType* T) {
return VisitTagDecl(T->getDecl());
}
bool UnnamedLocalNoLinkageFinder::VisitTemplateTypeParmType(
const TemplateTypeParmType*) {
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitSubstTemplateTypeParmPackType(
const SubstTemplateTypeParmPackType *) {
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitTemplateSpecializationType(
const TemplateSpecializationType*) {
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitInjectedClassNameType(
const InjectedClassNameType* T) {
return VisitTagDecl(T->getDecl());
}
bool UnnamedLocalNoLinkageFinder::VisitDependentNameType(
const DependentNameType* T) {
return VisitNestedNameSpecifier(T->getQualifier());
}
bool UnnamedLocalNoLinkageFinder::VisitDependentTemplateSpecializationType(
const DependentTemplateSpecializationType* T) {
return VisitNestedNameSpecifier(T->getQualifier());
}
bool UnnamedLocalNoLinkageFinder::VisitPackExpansionType(
const PackExpansionType* T) {
return Visit(T->getPattern());
}
bool UnnamedLocalNoLinkageFinder::VisitObjCObjectType(const ObjCObjectType *) {
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitObjCInterfaceType(
const ObjCInterfaceType *) {
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitObjCObjectPointerType(
const ObjCObjectPointerType *) {
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitAtomicType(const AtomicType* T) {
return Visit(T->getValueType());
}
bool UnnamedLocalNoLinkageFinder::VisitPipeType(const PipeType* T) {
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
if (Tag->getDeclContext()->isFunctionOrMethod()) {
S.Diag(SR.getBegin(),
S.getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_template_arg_local_type :
diag::ext_template_arg_local_type)
<< S.Context.getTypeDeclType(Tag) << SR;
return true;
}
if (!Tag->hasNameForLinkage()) {
S.Diag(SR.getBegin(),
S.getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_template_arg_unnamed_type :
diag::ext_template_arg_unnamed_type) << SR;
S.Diag(Tag->getLocation(), diag::note_template_unnamed_type_here);
return true;
}
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
NestedNameSpecifier *NNS) {
if (NNS->getPrefix() && VisitNestedNameSpecifier(NNS->getPrefix()))
return true;
switch (NNS->getKind()) {
case NestedNameSpecifier::Identifier:
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Global:
case NestedNameSpecifier::Super:
return false;
case NestedNameSpecifier::TypeSpec:
case NestedNameSpecifier::TypeSpecWithTemplate:
return Visit(QualType(NNS->getAsType(), 0));
}
llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
}
/// Check a template argument against its corresponding
/// template type parameter.
///
/// This routine implements the semantics of C++ [temp.arg.type]. It
/// returns true if an error occurred, and false otherwise.
bool Sema::CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *ArgInfo) {
assert(ArgInfo && "invalid TypeSourceInfo");
QualType Arg = ArgInfo->getType();
SourceRange SR = ArgInfo->getTypeLoc().getSourceRange();
if (Arg->isVariablyModifiedType()) {
return Diag(SR.getBegin(), diag::err_variably_modified_template_arg) << Arg;
} else if (Context.hasSameUnqualifiedType(Arg, Context.OverloadTy)) {
return Diag(SR.getBegin(), diag::err_template_arg_overload_type) << SR;
}
// C++03 [temp.arg.type]p2:
// A local type, a type with no linkage, an unnamed type or a type
// compounded from any of these types shall not be used as a
// template-argument for a template type-parameter.
//
// C++11 allows these, and even in C++03 we allow them as an extension with
// a warning.
if (LangOpts.CPlusPlus11 || Arg->hasUnnamedOrLocalType()) {
UnnamedLocalNoLinkageFinder Finder(*this, SR);
(void)Finder.Visit(Context.getCanonicalType(Arg));
}
return false;
}
enum NullPointerValueKind {
NPV_NotNullPointer,
NPV_NullPointer,
NPV_Error
};
/// Determine whether the given template argument is a null pointer
/// value of the appropriate type.
static NullPointerValueKind
isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
QualType ParamType, Expr *Arg,
Decl *Entity = nullptr) {
if (Arg->isValueDependent() || Arg->isTypeDependent())
return NPV_NotNullPointer;
// dllimport'd entities aren't constant but are available inside of template
// arguments.
if (Entity && Entity->hasAttr<DLLImportAttr>())
return NPV_NotNullPointer;
if (!S.isCompleteType(Arg->getExprLoc(), ParamType))
llvm_unreachable(
"Incomplete parameter type in isNullPointerValueTemplateArgument!");
if (!S.getLangOpts().CPlusPlus11)
return NPV_NotNullPointer;
// Determine whether we have a constant expression.
ExprResult ArgRV = S.DefaultFunctionArrayConversion(Arg);
if (ArgRV.isInvalid())
return NPV_Error;
Arg = ArgRV.get();
Expr::EvalResult EvalResult;
SmallVector<PartialDiagnosticAt, 8> Notes;
EvalResult.Diag = &Notes;
if (!Arg->EvaluateAsRValue(EvalResult, S.Context) ||
EvalResult.HasSideEffects) {
SourceLocation DiagLoc = Arg->getExprLoc();
// If our only note is the usual "invalid subexpression" note, just point
// the caret at its location rather than producing an essentially
// redundant note.
if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
diag::note_invalid_subexpr_in_const_expr) {
DiagLoc = Notes[0].first;
Notes.clear();
}
S.Diag(DiagLoc, diag::err_template_arg_not_address_constant)
<< Arg->getType() << Arg->getSourceRange();
for (unsigned I = 0, N = Notes.size(); I != N; ++I)
S.Diag(Notes[I].first, Notes[I].second);
S.Diag(Param->getLocation(), diag::note_template_param_here);
return NPV_Error;
}
// C++11 [temp.arg.nontype]p1:
// - an address constant expression of type std::nullptr_t
if (Arg->getType()->isNullPtrType())
return NPV_NullPointer;
// - a constant expression that evaluates to a null pointer value (4.10); or
// - a constant expression that evaluates to a null member pointer value
// (4.11); or
if ((EvalResult.Val.isLValue() && !EvalResult.Val.getLValueBase()) ||
(EvalResult.Val.isMemberPointer() &&
!EvalResult.Val.getMemberPointerDecl())) {
// If our expression has an appropriate type, we've succeeded.
bool ObjCLifetimeConversion;
if (S.Context.hasSameUnqualifiedType(Arg->getType(), ParamType) ||
S.IsQualificationConversion(Arg->getType(), ParamType, false,
ObjCLifetimeConversion))
return NPV_NullPointer;
// The types didn't match, but we know we got a null pointer; complain,
// then recover as if the types were correct.
S.Diag(Arg->getExprLoc(), diag::err_template_arg_wrongtype_null_constant)
<< Arg->getType() << ParamType << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return NPV_NullPointer;
}
// If we don't have a null pointer value, but we do have a NULL pointer
// constant, suggest a cast to the appropriate type.
if (Arg->isNullPointerConstant(S.Context, Expr::NPC_NeverValueDependent)) {
std::string Code = "static_cast<" + ParamType.getAsString() + ">(";
S.Diag(Arg->getExprLoc(), diag::err_template_arg_untyped_null_constant)
<< ParamType << FixItHint::CreateInsertion(Arg->getLocStart(), Code)
<< FixItHint::CreateInsertion(S.getLocForEndOfToken(Arg->getLocEnd()),
")");
S.Diag(Param->getLocation(), diag::note_template_param_here);
return NPV_NullPointer;
}
// FIXME: If we ever want to support general, address-constant expressions
// as non-type template arguments, we should return the ExprResult here to
// be interpreted by the caller.
return NPV_NotNullPointer;
}
/// Checks whether the given template argument is compatible with its
/// template parameter.
static bool CheckTemplateArgumentIsCompatibleWithParameter(
Sema &S, NonTypeTemplateParmDecl *Param, QualType ParamType, Expr *ArgIn,
Expr *Arg, QualType ArgType) {
bool ObjCLifetimeConversion;
if (ParamType->isPointerType() &&
!ParamType->getAs<PointerType>()->getPointeeType()->isFunctionType() &&
S.IsQualificationConversion(ArgType, ParamType, false,
ObjCLifetimeConversion)) {
// For pointer-to-object types, qualification conversions are
// permitted.
} else {
if (const ReferenceType *ParamRef = ParamType->getAs<ReferenceType>()) {
if (!ParamRef->getPointeeType()->isFunctionType()) {
// C++ [temp.arg.nontype]p5b3:
// For a non-type template-parameter of type reference to
// object, no conversions apply. The type referred to by the
// reference may be more cv-qualified than the (otherwise
// identical) type of the template- argument. The
// template-parameter is bound directly to the
// template-argument, which shall be an lvalue.
// FIXME: Other qualifiers?
unsigned ParamQuals = ParamRef->getPointeeType().getCVRQualifiers();
unsigned ArgQuals = ArgType.getCVRQualifiers();
if ((ParamQuals | ArgQuals) != ParamQuals) {
S.Diag(Arg->getLocStart(),
diag::err_template_arg_ref_bind_ignores_quals)
<< ParamType << Arg->getType() << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
}
}
// At this point, the template argument refers to an object or
// function with external linkage. We now need to check whether the
// argument and parameter types are compatible.
if (!S.Context.hasSameUnqualifiedType(ArgType,
ParamType.getNonReferenceType())) {
// We can't perform this conversion or binding.
if (ParamType->isReferenceType())
S.Diag(Arg->getLocStart(), diag::err_template_arg_no_ref_bind)
<< ParamType << ArgIn->getType() << Arg->getSourceRange();
else
S.Diag(Arg->getLocStart(), diag::err_template_arg_not_convertible)
<< ArgIn->getType() << ParamType << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
}
return false;
}
/// Checks whether the given template argument is the address
/// of an object or function according to C++ [temp.arg.nontype]p1.
static bool
CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
NonTypeTemplateParmDecl *Param,
QualType ParamType,
Expr *ArgIn,
TemplateArgument &Converted) {
bool Invalid = false;
Expr *Arg = ArgIn;
QualType ArgType = Arg->getType();
bool AddressTaken = false;
SourceLocation AddrOpLoc;
if (S.getLangOpts().MicrosoftExt) {
// Microsoft Visual C++ strips all casts, allows an arbitrary number of
// dereference and address-of operators.
Arg = Arg->IgnoreParenCasts();
bool ExtWarnMSTemplateArg = false;
UnaryOperatorKind FirstOpKind;
SourceLocation FirstOpLoc;
while (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) {
UnaryOperatorKind UnOpKind = UnOp->getOpcode();
if (UnOpKind == UO_Deref)
ExtWarnMSTemplateArg = true;
if (UnOpKind == UO_AddrOf || UnOpKind == UO_Deref) {
Arg = UnOp->getSubExpr()->IgnoreParenCasts();
if (!AddrOpLoc.isValid()) {
FirstOpKind = UnOpKind;
FirstOpLoc = UnOp->getOperatorLoc();
}
} else
break;
}
if (FirstOpLoc.isValid()) {
if (ExtWarnMSTemplateArg)
S.Diag(ArgIn->getLocStart(), diag::ext_ms_deref_template_argument)
<< ArgIn->getSourceRange();
if (FirstOpKind == UO_AddrOf)
AddressTaken = true;
else if (Arg->getType()->isPointerType()) {
// We cannot let pointers get dereferenced here, that is obviously not a
// constant expression.
assert(FirstOpKind == UO_Deref);
S.Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
<< Arg->getSourceRange();
}
}
} else {
// See through any implicit casts we added to fix the type.
Arg = Arg->IgnoreImpCasts();
// C++ [temp.arg.nontype]p1:
//
// A template-argument for a non-type, non-template
// template-parameter shall be one of: [...]
//
// -- the address of an object or function with external
// linkage, including function templates and function
// template-ids but excluding non-static class members,
// expressed as & id-expression where the & is optional if
// the name refers to a function or array, or if the
// corresponding template-parameter is a reference; or
// In C++98/03 mode, give an extension warning on any extra parentheses.
// See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773
bool ExtraParens = false;
while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
if (!Invalid && !ExtraParens) {
S.Diag(Arg->getLocStart(),
S.getLangOpts().CPlusPlus11
? diag::warn_cxx98_compat_template_arg_extra_parens
: diag::ext_template_arg_extra_parens)
<< Arg->getSourceRange();
ExtraParens = true;
}
Arg = Parens->getSubExpr();
}
while (SubstNonTypeTemplateParmExpr *subst =
dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
Arg = subst->getReplacement()->IgnoreImpCasts();
if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) {
if (UnOp->getOpcode() == UO_AddrOf) {
Arg = UnOp->getSubExpr();
AddressTaken = true;
AddrOpLoc = UnOp->getOperatorLoc();
}
}
while (SubstNonTypeTemplateParmExpr *subst =
dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
Arg = subst->getReplacement()->IgnoreImpCasts();
}
DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg);
ValueDecl *Entity = DRE ? DRE->getDecl() : nullptr;
// If our parameter has pointer type, check for a null template value.
if (ParamType->isPointerType() || ParamType->isNullPtrType()) {
switch (isNullPointerValueTemplateArgument(S, Param, ParamType, ArgIn,
Entity)) {
case NPV_NullPointer:
S.Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
Converted = TemplateArgument(S.Context.getCanonicalType(ParamType),
/*isNullPtr=*/true);
return false;
case NPV_Error:
return true;
case NPV_NotNullPointer:
break;
}
}
// Stop checking the precise nature of the argument if it is value dependent,
// it should be checked when instantiated.
if (Arg->isValueDependent()) {
Converted = TemplateArgument(ArgIn);
return false;
}
if (isa<CXXUuidofExpr>(Arg)) {
if (CheckTemplateArgumentIsCompatibleWithParameter(S, Param, ParamType,
ArgIn, Arg, ArgType))
return true;
Converted = TemplateArgument(ArgIn);
return false;
}
if (!DRE) {
S.Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
<< Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
// Cannot refer to non-static data members
if (isa<FieldDecl>(Entity) || isa<IndirectFieldDecl>(Entity)) {
S.Diag(Arg->getLocStart(), diag::err_template_arg_field)
<< Entity << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
// Cannot refer to non-static member functions
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Entity)) {
if (!Method->isStatic()) {
S.Diag(Arg->getLocStart(), diag::err_template_arg_method)
<< Method << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
}
FunctionDecl *Func = dyn_cast<FunctionDecl>(Entity);
VarDecl *Var = dyn_cast<VarDecl>(Entity);
// A non-type template argument must refer to an object or function.
if (!Func && !Var) {
// We found something, but we don't know specifically what it is.
S.Diag(Arg->getLocStart(), diag::err_template_arg_not_object_or_func)
<< Arg->getSourceRange();
S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
return true;
}
// Address / reference template args must have external linkage in C++98.
if (Entity->getFormalLinkage() == InternalLinkage) {
S.Diag(Arg->getLocStart(), S.getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_template_arg_object_internal :
diag::ext_template_arg_object_internal)
<< !Func << Entity << Arg->getSourceRange();
S.Diag(Entity->getLocation(), diag::note_template_arg_internal_object)
<< !Func;
} else if (!Entity->hasLinkage()) {
S.Diag(Arg->getLocStart(), diag::err_template_arg_object_no_linkage)
<< !Func << Entity << Arg->getSourceRange();
S.Diag(Entity->getLocation(), diag::note_template_arg_internal_object)
<< !Func;
return true;
}
if (Func) {
// If the template parameter has pointer type, the function decays.
if (ParamType->isPointerType() && !AddressTaken)
ArgType = S.Context.getPointerType(Func->getType());
else if (AddressTaken && ParamType->isReferenceType()) {
// If we originally had an address-of operator, but the
// parameter has reference type, complain and (if things look
// like they will work) drop the address-of operator.
if (!S.Context.hasSameUnqualifiedType(Func->getType(),
ParamType.getNonReferenceType())) {
S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
<< ParamType;
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
<< ParamType
<< FixItHint::CreateRemoval(AddrOpLoc);
S.Diag(Param->getLocation(), diag::note_template_param_here);
ArgType = Func->getType();
}
} else {
// A value of reference type is not an object.
if (Var->getType()->isReferenceType()) {
S.Diag(Arg->getLocStart(),
diag::err_template_arg_reference_var)
<< Var->getType() << Arg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
// A template argument must have static storage duration.
if (Var->getTLSKind()) {
S.Diag(Arg->getLocStart(), diag::err_template_arg_thread_local)
<< Arg->getSourceRange();
S.Diag(Var->getLocation(), diag::note_template_arg_refers_here);
return true;
}
// If the template parameter has pointer type, we must have taken
// the address of this object.
if (ParamType->isReferenceType()) {
if (AddressTaken) {
// If we originally had an address-of operator, but the
// parameter has reference type, complain and (if things look
// like they will work) drop the address-of operator.
if (!S.Context.hasSameUnqualifiedType(Var->getType(),
ParamType.getNonReferenceType())) {
S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
<< ParamType;
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
<< ParamType
<< FixItHint::CreateRemoval(AddrOpLoc);
S.Diag(Param->getLocation(), diag::note_template_param_here);
ArgType = Var->getType();
}
} else if (!AddressTaken && ParamType->isPointerType()) {
if (Var->getType()->isArrayType()) {
// Array-to-pointer decay.
ArgType = S.Context.getArrayDecayedType(Var->getType());
} else {
// If the template parameter has pointer type but the address of
// this object was not taken, complain and (possibly) recover by
// taking the address of the entity.
ArgType = S.Context.getPointerType(Var->getType());
if (!S.Context.hasSameUnqualifiedType(ArgType, ParamType)) {
S.Diag(Arg->getLocStart(), diag::err_template_arg_not_address_of)
<< ParamType;
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
S.Diag(Arg->getLocStart(), diag::err_template_arg_not_address_of)
<< ParamType
<< FixItHint::CreateInsertion(Arg->getLocStart(), "&");
S.Diag(Param->getLocation(), diag::note_template_param_here);
}
}
}
if (CheckTemplateArgumentIsCompatibleWithParameter(S, Param, ParamType, ArgIn,
Arg, ArgType))
return true;
// Create the template argument.
Converted =
TemplateArgument(cast<ValueDecl>(Entity->getCanonicalDecl()), ParamType);
S.MarkAnyDeclReferenced(Arg->getLocStart(), Entity, false);
return false;
}
/// Checks whether the given template argument is a pointer to
/// member constant according to C++ [temp.arg.nontype]p1.
static bool CheckTemplateArgumentPointerToMember(Sema &S,
NonTypeTemplateParmDecl *Param,
QualType ParamType,
Expr *&ResultArg,
TemplateArgument &Converted) {
bool Invalid = false;
Expr *Arg = ResultArg;
bool ObjCLifetimeConversion;
// C++ [temp.arg.nontype]p1:
//
// A template-argument for a non-type, non-template
// template-parameter shall be one of: [...]
//
// -- a pointer to member expressed as described in 5.3.1.
DeclRefExpr *DRE = nullptr;
// In C++98/03 mode, give an extension warning on any extra parentheses.
// See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773
bool ExtraParens = false;
while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
if (!Invalid && !ExtraParens) {
S.Diag(Arg->getLocStart(),
S.getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_template_arg_extra_parens :
diag::ext_template_arg_extra_parens)
<< Arg->getSourceRange();
ExtraParens = true;
}
Arg = Parens->getSubExpr();
}
while (SubstNonTypeTemplateParmExpr *subst =
dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
Arg = subst->getReplacement()->IgnoreImpCasts();
// A pointer-to-member constant written &Class::member.
if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) {
if (UnOp->getOpcode() == UO_AddrOf) {
DRE = dyn_cast<DeclRefExpr>(UnOp->getSubExpr());
if (DRE && !DRE->getQualifier())
DRE = nullptr;
}
}
// A constant of pointer-to-member type.
else if ((DRE = dyn_cast<DeclRefExpr>(Arg))) {
ValueDecl *VD = DRE->getDecl();
if (VD->getType()->isMemberPointerType()) {
if (isa<NonTypeTemplateParmDecl>(VD)) {
if (Arg->isTypeDependent() || Arg->isValueDependent()) {
Converted = TemplateArgument(Arg);
} else {
VD = cast<ValueDecl>(VD->getCanonicalDecl());
Converted = TemplateArgument(VD, ParamType);
}
return Invalid;
}
}
DRE = nullptr;
}
ValueDecl *Entity = DRE ? DRE->getDecl() : nullptr;
// Check for a null pointer value.
switch (isNullPointerValueTemplateArgument(S, Param, ParamType, ResultArg,
Entity)) {
case NPV_Error:
return true;
case NPV_NullPointer:
S.Diag(ResultArg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
Converted = TemplateArgument(S.Context.getCanonicalType(ParamType),
/*isNullPtr*/true);
return false;
case NPV_NotNullPointer:
break;
}
if (S.IsQualificationConversion(ResultArg->getType(),
ParamType.getNonReferenceType(), false,
ObjCLifetimeConversion)) {
ResultArg = S.ImpCastExprToType(ResultArg, ParamType, CK_NoOp,
ResultArg->getValueKind())
.get();
} else if (!S.Context.hasSameUnqualifiedType(
ResultArg->getType(), ParamType.getNonReferenceType())) {
// We can't perform this conversion.
S.Diag(ResultArg->getLocStart(), diag::err_template_arg_not_convertible)
<< ResultArg->getType() << ParamType << ResultArg->getSourceRange();
S.Diag(Param->getLocation(), diag::note_template_param_here);
return true;
}
if (!DRE)
return S.Diag(Arg->getLocStart(),
diag::err_template_arg_not_pointer_to_member_form)
<< Arg->getSourceRange();
if (isa<FieldDecl>(DRE->getDecl()) ||
isa<IndirectFieldDecl>(DRE->getDecl()) ||
isa<CXXMethodDecl>(DRE->getDecl())) {
assert((isa<FieldDecl>(DRE->getDecl()) ||
isa<IndirectFieldDecl>(DRE->getDecl()) ||
!cast<CXXMethodDecl>(DRE->getDecl())->isStatic()) &&
"Only non-static member pointers can make it here");
// Okay: this is the address of a non-static member, and therefore
// a member pointer constant.
if (Arg->isTypeDependent() || Arg->isValueDependent()) {
Converted = TemplateArgument(Arg);
} else {
ValueDecl *D = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
Converted = TemplateArgument(D, ParamType);
}
return Invalid;
}
// We found something else, but we don't know specifically what it is.
S.Diag(Arg->getLocStart(),
diag::err_template_arg_not_pointer_to_member_form)
<< Arg->getSourceRange();
S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
return true;
}
/// Check a template argument against its corresponding
/// non-type template parameter.
///
/// This routine implements the semantics of C++ [temp.arg.nontype].
/// If an error occurred, it returns ExprError(); otherwise, it
/// returns the converted template argument. \p ParamType is the
/// type of the non-type template parameter after it has been instantiated.
ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType ParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK) {
SourceLocation StartLoc = Arg->getLocStart();
// If the parameter type somehow involves auto, deduce the type now.
if (getLangOpts().CPlusPlus17 && ParamType->isUndeducedType()) {
// During template argument deduction, we allow 'decltype(auto)' to
// match an arbitrary dependent argument.
// FIXME: The language rules don't say what happens in this case.
// FIXME: We get an opaque dependent type out of decltype(auto) if the
// expression is merely instantiation-dependent; is this enough?
if (CTAK == CTAK_Deduced && Arg->isTypeDependent()) {
auto *AT = dyn_cast<AutoType>(ParamType);
if (AT && AT->isDecltypeAuto()) {
Converted = TemplateArgument(Arg);
return Arg;
}
}
// When checking a deduced template argument, deduce from its type even if
// the type is dependent, in order to check the types of non-type template
// arguments line up properly in partial ordering.
Optional<unsigned> Depth;
if (CTAK != CTAK_Specified)
Depth = Param->getDepth() + 1;
if (DeduceAutoType(
Context.getTrivialTypeSourceInfo(ParamType, Param->getLocation()),
Arg, ParamType, Depth) == DAR_Failed) {
Diag(Arg->getExprLoc(),
diag::err_non_type_template_parm_type_deduction_failure)
<< Param->getDeclName() << Param->getType() << Arg->getType()
<< Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
}
// CheckNonTypeTemplateParameterType will produce a diagnostic if there's
// an error. The error message normally references the parameter
// declaration, but here we'll pass the argument location because that's
// where the parameter type is deduced.
ParamType = CheckNonTypeTemplateParameterType(ParamType, Arg->getExprLoc());
if (ParamType.isNull()) {
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
}
}
// We should have already dropped all cv-qualifiers by now.
assert(!ParamType.hasQualifiers() &&
"non-type template parameter type cannot be qualified");
if (CTAK == CTAK_Deduced &&
!Context.hasSameType(ParamType.getNonLValueExprType(Context),
Arg->getType())) {
// FIXME: If either type is dependent, we skip the check. This isn't
// correct, since during deduction we're supposed to have replaced each
// template parameter with some unique (non-dependent) placeholder.
// FIXME: If the argument type contains 'auto', we carry on and fail the
// type check in order to force specific types to be more specialized than
// 'auto'. It's not clear how partial ordering with 'auto' is supposed to
// work.
if ((ParamType->isDependentType() || Arg->isTypeDependent()) &&
!Arg->getType()->getContainedAutoType()) {
Converted = TemplateArgument(Arg);
return Arg;
}
// FIXME: This attempts to implement C++ [temp.deduct.type]p17. Per DR1770,
// we should actually be checking the type of the template argument in P,
// not the type of the template argument deduced from A, against the
// template parameter type.
Diag(StartLoc, diag::err_deduced_non_type_template_arg_type_mismatch)
<< Arg->getType()
<< ParamType.getUnqualifiedType();
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
}
// If either the parameter has a dependent type or the argument is
// type-dependent, there's nothing we can check now.
if (ParamType->isDependentType() || Arg->isTypeDependent()) {
// FIXME: Produce a cloned, canonical expression?
Converted = TemplateArgument(Arg);
return Arg;
}
// The initialization of the parameter from the argument is
// a constant-evaluated context.
EnterExpressionEvaluationContext ConstantEvaluated(
*this, Sema::ExpressionEvaluationContext::ConstantEvaluated);
if (getLangOpts().CPlusPlus17) {
// C++17 [temp.arg.nontype]p1:
// A template-argument for a non-type template parameter shall be
// a converted constant expression of the type of the template-parameter.
APValue Value;
ExprResult ArgResult = CheckConvertedConstantExpression(
Arg, ParamType, Value, CCEK_TemplateArg);
if (ArgResult.isInvalid())
return ExprError();
// For a value-dependent argument, CheckConvertedConstantExpression is
// permitted (and expected) to be unable to determine a value.
if (ArgResult.get()->isValueDependent()) {
Converted = TemplateArgument(ArgResult.get());
return ArgResult;
}
QualType CanonParamType = Context.getCanonicalType(ParamType);
// Convert the APValue to a TemplateArgument.
switch (Value.getKind()) {
case APValue::Uninitialized:
assert(ParamType->isNullPtrType());
Converted = TemplateArgument(CanonParamType, /*isNullPtr*/true);
break;
case APValue::Int:
assert(ParamType->isIntegralOrEnumerationType());
Converted = TemplateArgument(Context, Value.getInt(), CanonParamType);
break;
case APValue::MemberPointer: {
assert(ParamType->isMemberPointerType());
// FIXME: We need TemplateArgument representation and mangling for these.
if (!Value.getMemberPointerPath().empty()) {
Diag(Arg->getLocStart(),
diag::err_template_arg_member_ptr_base_derived_not_supported)
<< Value.getMemberPointerDecl() << ParamType
<< Arg->getSourceRange();
return ExprError();
}
auto *VD = const_cast<ValueDecl*>(Value.getMemberPointerDecl());
Converted = VD ? TemplateArgument(VD, CanonParamType)
: TemplateArgument(CanonParamType, /*isNullPtr*/true);
break;
}
case APValue::LValue: {
// For a non-type template-parameter of pointer or reference type,
// the value of the constant expression shall not refer to
assert(ParamType->isPointerType() || ParamType->isReferenceType() ||
ParamType->isNullPtrType());
// -- a temporary object
// -- a string literal
// -- the result of a typeid expression, or
// -- a predefined __func__ variable
if (auto *E = Value.getLValueBase().dyn_cast<const Expr*>()) {
if (isa<CXXUuidofExpr>(E)) {
Converted = TemplateArgument(ArgResult.get());
break;
}
Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
<< Arg->getSourceRange();
return ExprError();
}
auto *VD = const_cast<ValueDecl *>(
Value.getLValueBase().dyn_cast<const ValueDecl *>());
// -- a subobject
if (Value.hasLValuePath() && Value.getLValuePath().size() == 1 &&
VD && VD->getType()->isArrayType() &&
Value.getLValuePath()[0].ArrayIndex == 0 &&
!Value.isLValueOnePastTheEnd() && ParamType->isPointerType()) {
// Per defect report (no number yet):
// ... other than a pointer to the first element of a complete array
// object.
} else if (!Value.hasLValuePath() || Value.getLValuePath().size() ||
Value.isLValueOnePastTheEnd()) {
Diag(StartLoc, diag::err_non_type_template_arg_subobject)
<< Value.getAsString(Context, ParamType);
return ExprError();
}
assert((VD || !ParamType->isReferenceType()) &&
"null reference should not be a constant expression");
assert((!VD || !ParamType->isNullPtrType()) &&
"non-null value of type nullptr_t?");
Converted = VD ? TemplateArgument(VD, CanonParamType)
: TemplateArgument(CanonParamType, /*isNullPtr*/true);
break;
}
case APValue::AddrLabelDiff:
return Diag(StartLoc, diag::err_non_type_template_arg_addr_label_diff);
case APValue::Float:
case APValue::ComplexInt:
case APValue::ComplexFloat:
case APValue::Vector:
case APValue::Array:
case APValue::Struct:
case APValue::Union:
llvm_unreachable("invalid kind for template argument");
}
return ArgResult.get();
}
// C++ [temp.arg.nontype]p5:
// The following conversions are performed on each expression used
// as a non-type template-argument. If a non-type
// template-argument cannot be converted to the type of the
// corresponding template-parameter then the program is
// ill-formed.
if (ParamType->isIntegralOrEnumerationType()) {
// C++11:
// -- for a non-type template-parameter of integral or
// enumeration type, conversions permitted in a converted
// constant expression are applied.
//
// C++98:
// -- for a non-type template-parameter of integral or
// enumeration type, integral promotions (4.5) and integral
// conversions (4.7) are applied.
if (getLangOpts().CPlusPlus11) {
// C++ [temp.arg.nontype]p1:
// A template-argument for a non-type, non-template template-parameter
// shall be one of:
//
// -- for a non-type template-parameter of integral or enumeration
// type, a converted constant expression of the type of the
// template-parameter; or
llvm::APSInt Value;
ExprResult ArgResult =
CheckConvertedConstantExpression(Arg, ParamType, Value,
CCEK_TemplateArg);
if (ArgResult.isInvalid())
return ExprError();
// We can't check arbitrary value-dependent arguments.
if (ArgResult.get()->isValueDependent()) {
Converted = TemplateArgument(ArgResult.get());
return ArgResult;
}
// Widen the argument value to sizeof(parameter type). This is almost
// always a no-op, except when the parameter type is bool. In
// that case, this may extend the argument from 1 bit to 8 bits.
QualType IntegerType = ParamType;
if (const EnumType *Enum = IntegerType->getAs<EnumType>())
IntegerType = Enum->getDecl()->getIntegerType();
Value = Value.extOrTrunc(Context.getTypeSize(IntegerType));
Converted = TemplateArgument(Context, Value,
Context.getCanonicalType(ParamType));
return ArgResult;
}
ExprResult ArgResult = DefaultLvalueConversion(Arg);
if (ArgResult.isInvalid())
return ExprError();
Arg = ArgResult.get();
QualType ArgType = Arg->getType();
// C++ [temp.arg.nontype]p1:
// A template-argument for a non-type, non-template
// template-parameter shall be one of:
//
// -- an integral constant-expression of integral or enumeration
// type; or
// -- the name of a non-type template-parameter; or
llvm::APSInt Value;
if (!ArgType->isIntegralOrEnumerationType()) {
Diag(Arg->getLocStart(),
diag::err_template_arg_not_integral_or_enumeral)
<< ArgType << Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
} else if (!Arg->isValueDependent()) {
class TmplArgICEDiagnoser : public VerifyICEDiagnoser {
QualType T;
public:
TmplArgICEDiagnoser(QualType T) : T(T) { }
void diagnoseNotICE(Sema &S, SourceLocation Loc,
SourceRange SR) override {
S.Diag(Loc, diag::err_template_arg_not_ice) << T << SR;
}
} Diagnoser(ArgType);
Arg = VerifyIntegerConstantExpression(Arg, &Value, Diagnoser,
false).get();
if (!Arg)
return ExprError();
}
// From here on out, all we care about is the unqualified form
// of the argument type.
ArgType = ArgType.getUnqualifiedType();
// Try to convert the argument to the parameter's type.
if (Context.hasSameType(ParamType, ArgType)) {
// Okay: no conversion necessary
} else if (ParamType->isBooleanType()) {
// This is an integral-to-boolean conversion.
Arg = ImpCastExprToType(Arg, ParamType, CK_IntegralToBoolean).get();
} else if (IsIntegralPromotion(Arg, ArgType, ParamType) ||
!ParamType->isEnumeralType()) {
// This is an integral promotion or conversion.
Arg = ImpCastExprToType(Arg, ParamType, CK_IntegralCast).get();
} else {
// We can't perform this conversion.
Diag(Arg->getLocStart(),
diag::err_template_arg_not_convertible)
<< Arg->getType() << ParamType << Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
}
// Add the value of this argument to the list of converted
// arguments. We use the bitwidth and signedness of the template
// parameter.
if (Arg->isValueDependent()) {
// The argument is value-dependent. Create a new
// TemplateArgument with the converted expression.
Converted = TemplateArgument(Arg);
return Arg;
}
QualType IntegerType = Context.getCanonicalType(ParamType);
if (const EnumType *Enum = IntegerType->getAs<EnumType>())
IntegerType = Context.getCanonicalType(Enum->getDecl()->getIntegerType());
if (ParamType->isBooleanType()) {
// Value must be zero or one.
Value = Value != 0;
unsigned AllowedBits = Context.getTypeSize(IntegerType);
if (Value.getBitWidth() != AllowedBits)
Value = Value.extOrTrunc(AllowedBits);
Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType());
} else {
llvm::APSInt OldValue = Value;
// Coerce the template argument's value to the value it will have
// based on the template parameter's type.
unsigned AllowedBits = Context.getTypeSize(IntegerType);
if (Value.getBitWidth() != AllowedBits)
Value = Value.extOrTrunc(AllowedBits);
Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType());
// Complain if an unsigned parameter received a negative value.
if (IntegerType->isUnsignedIntegerOrEnumerationType()
&& (OldValue.isSigned() && OldValue.isNegative())) {
Diag(Arg->getLocStart(), diag::warn_template_arg_negative)
<< OldValue.toString(10) << Value.toString(10) << Param->getType()
<< Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
}
// Complain if we overflowed the template parameter's type.
unsigned RequiredBits;
if (IntegerType->isUnsignedIntegerOrEnumerationType())
RequiredBits = OldValue.getActiveBits();
else if (OldValue.isUnsigned())
RequiredBits = OldValue.getActiveBits() + 1;
else
RequiredBits = OldValue.getMinSignedBits();
if (RequiredBits > AllowedBits) {
Diag(Arg->getLocStart(),
diag::warn_template_arg_too_large)
<< OldValue.toString(10) << Value.toString(10) << Param->getType()
<< Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
}
}
Converted = TemplateArgument(Context, Value,
ParamType->isEnumeralType()
? Context.getCanonicalType(ParamType)
: IntegerType);
return Arg;
}
QualType ArgType = Arg->getType();
DeclAccessPair FoundResult; // temporary for ResolveOverloadedFunction
// Handle pointer-to-function, reference-to-function, and
// pointer-to-member-function all in (roughly) the same way.
if (// -- For a non-type template-parameter of type pointer to
// function, only the function-to-pointer conversion (4.3) is
// applied. If the template-argument represents a set of
// overloaded functions (or a pointer to such), the matching
// function is selected from the set (13.4).
(ParamType->isPointerType() &&
ParamType->getAs<PointerType>()->getPointeeType()->isFunctionType()) ||
// -- For a non-type template-parameter of type reference to
// function, no conversions apply. If the template-argument
// represents a set of overloaded functions, the matching
// function is selected from the set (13.4).
(ParamType->isReferenceType() &&
ParamType->getAs<ReferenceType>()->getPointeeType()->isFunctionType()) ||
// -- For a non-type template-parameter of type pointer to
// member function, no conversions apply. If the
// template-argument represents a set of overloaded member
// functions, the matching member function is selected from
// the set (13.4).
(ParamType->isMemberPointerType() &&
ParamType->getAs<MemberPointerType>()->getPointeeType()
->isFunctionType())) {
if (Arg->getType() == Context.OverloadTy) {
if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg, ParamType,
true,
FoundResult)) {
if (DiagnoseUseOfDecl(Fn, Arg->getLocStart()))
return ExprError();
Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
ArgType = Arg->getType();
} else
return ExprError();
}
if (!ParamType->isMemberPointerType()) {
if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
ParamType,
Arg, Converted))
return ExprError();
return Arg;
}
if (CheckTemplateArgumentPointerToMember(*this, Param, ParamType, Arg,
Converted))
return ExprError();
return Arg;
}
if (ParamType->isPointerType()) {
// -- for a non-type template-parameter of type pointer to
// object, qualification conversions (4.4) and the
// array-to-pointer conversion (4.2) are applied.
// C++0x also allows a value of std::nullptr_t.
assert(ParamType->getPointeeType()->isIncompleteOrObjectType() &&
"Only object pointers allowed here");
if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
ParamType,
Arg, Converted))
return ExprError();
return Arg;
}
if (const ReferenceType *ParamRefType = ParamType->getAs<ReferenceType>()) {
// -- For a non-type template-parameter of type reference to
// object, no conversions apply. The type referred to by the
// reference may be more cv-qualified than the (otherwise
// identical) type of the template-argument. The
// template-parameter is bound directly to the
// template-argument, which must be an lvalue.
assert(ParamRefType->getPointeeType()->isIncompleteOrObjectType() &&
"Only object references allowed here");
if (Arg->getType() == Context.OverloadTy) {
if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg,
ParamRefType->getPointeeType(),
true,
FoundResult)) {
if (DiagnoseUseOfDecl(Fn, Arg->getLocStart()))
return ExprError();
Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
ArgType = Arg->getType();
} else
return ExprError();
}
if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
ParamType,
Arg, Converted))
return ExprError();
return Arg;
}
// Deal with parameters of type std::nullptr_t.
if (ParamType->isNullPtrType()) {
if (Arg->isTypeDependent() || Arg->isValueDependent()) {
Converted = TemplateArgument(Arg);
return Arg;
}
switch (isNullPointerValueTemplateArgument(*this, Param, ParamType, Arg)) {
case NPV_NotNullPointer:
Diag(Arg->getExprLoc(), diag::err_template_arg_not_convertible)
<< Arg->getType() << ParamType;
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
case NPV_Error:
return ExprError();
case NPV_NullPointer:
Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
Converted = TemplateArgument(Context.getCanonicalType(ParamType),
/*isNullPtr*/true);
return Arg;
}
}
// -- For a non-type template-parameter of type pointer to data
// member, qualification conversions (4.4) are applied.
assert(ParamType->isMemberPointerType() && "Only pointers to members remain");
if (CheckTemplateArgumentPointerToMember(*this, Param, ParamType, Arg,
Converted))
return ExprError();
return Arg;
}
static void DiagnoseTemplateParameterListArityMismatch(
Sema &S, TemplateParameterList *New, TemplateParameterList *Old,
Sema::TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc);
/// Check a template argument against its corresponding
/// template template parameter.
///
/// This routine implements the semantics of C++ [temp.arg.template].
/// It returns true if an error occurred, and false otherwise.
bool Sema::CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg) {
TemplateName Name = Arg.getArgument().getAsTemplateOrTemplatePattern();
TemplateDecl *Template = Name.getAsTemplateDecl();
if (!Template) {
// Any dependent template name is fine.
assert(Name.isDependent() && "Non-dependent template isn't a declaration?");
return false;
}
if (Template->isInvalidDecl())
return true;
// C++0x [temp.arg.template]p1:
// A template-argument for a template template-parameter shall be
// the name of a class template or an alias template, expressed as an
// id-expression. When the template-argument names a class template, only
// primary class templates are considered when matching the
// template template argument with the corresponding parameter;
// partial specializations are not considered even if their
// parameter lists match that of the template template parameter.
//
// Note that we also allow template template parameters here, which
// will happen when we are dealing with, e.g., class template
// partial specializations.
if (!isa<ClassTemplateDecl>(Template) &&
!isa<TemplateTemplateParmDecl>(Template) &&
!isa<TypeAliasTemplateDecl>(Template) &&
!isa<BuiltinTemplateDecl>(Template)) {
assert(isa<FunctionTemplateDecl>(Template) &&
"Only function templates are possible here");
Diag(Arg.getLocation(), diag::err_template_arg_not_valid_template);
Diag(Template->getLocation(), diag::note_template_arg_refers_here_func)
<< Template;
}
// C++1z [temp.arg.template]p3: (DR 150)
// A template-argument matches a template template-parameter P when P
// is at least as specialized as the template-argument A.
if (getLangOpts().RelaxedTemplateTemplateArgs) {
// Quick check for the common case:
// If P contains a parameter pack, then A [...] matches P if each of A's
// template parameters matches the corresponding template parameter in
// the template-parameter-list of P.
if (TemplateParameterListsAreEqual(
Template->getTemplateParameters(), Params, false,
TPL_TemplateTemplateArgumentMatch, Arg.getLocation()))
return false;
if (isTemplateTemplateParameterAtLeastAsSpecializedAs(Params, Template,
Arg.getLocation()))
return false;
// FIXME: Produce better diagnostics for deduction failures.
}
return !TemplateParameterListsAreEqual(Template->getTemplateParameters(),
Params,
true,
TPL_TemplateTemplateArgumentMatch,
Arg.getLocation());
}
/// Given a non-type template argument that refers to a
/// declaration and the type of its corresponding non-type template
/// parameter, produce an expression that properly refers to that
/// declaration.
ExprResult
Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc) {
// C++ [temp.param]p8:
//
// A non-type template-parameter of type "array of T" or
// "function returning T" is adjusted to be of type "pointer to
// T" or "pointer to function returning T", respectively.
if (ParamType->isArrayType())
ParamType = Context.getArrayDecayedType(ParamType);
else if (ParamType->isFunctionType())
ParamType = Context.getPointerType(ParamType);
// For a NULL non-type template argument, return nullptr casted to the
// parameter's type.
if (Arg.getKind() == TemplateArgument::NullPtr) {
return ImpCastExprToType(
new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc),
ParamType,
ParamType->getAs<MemberPointerType>()
? CK_NullToMemberPointer
: CK_NullToPointer);
}
assert(Arg.getKind() == TemplateArgument::Declaration &&
"Only declaration template arguments permitted here");
ValueDecl *VD = Arg.getAsDecl();
if (VD->getDeclContext()->isRecord() &&
(isa<CXXMethodDecl>(VD) || isa<FieldDecl>(VD) ||
isa<IndirectFieldDecl>(VD))) {
// If the value is a class member, we might have a pointer-to-member.
// Determine whether the non-type template template parameter is of
// pointer-to-member type. If so, we need to build an appropriate
// expression for a pointer-to-member, since a "normal" DeclRefExpr
// would refer to the member itself.
if (ParamType->isMemberPointerType()) {
QualType ClassType
= Context.getTypeDeclType(cast<RecordDecl>(VD->getDeclContext()));
NestedNameSpecifier *Qualifier
= NestedNameSpecifier::Create(Context, nullptr, false,
ClassType.getTypePtr());
CXXScopeSpec SS;
SS.MakeTrivial(Context, Qualifier, Loc);
// The actual value-ness of this is unimportant, but for
// internal consistency's sake, references to instance methods
// are r-values.
ExprValueKind VK = VK_LValue;
if (isa<CXXMethodDecl>(VD) && cast<CXXMethodDecl>(VD)->isInstance())
VK = VK_RValue;
ExprResult RefExpr = BuildDeclRefExpr(VD,
VD->getType().getNonReferenceType(),
VK,
Loc,
&SS);
if (RefExpr.isInvalid())
return ExprError();
RefExpr = CreateBuiltinUnaryOp(Loc, UO_AddrOf, RefExpr.get());
// We might need to perform a trailing qualification conversion, since
// the element type on the parameter could be more qualified than the
// element type in the expression we constructed.
bool ObjCLifetimeConversion;
if (IsQualificationConversion(((Expr*) RefExpr.get())->getType(),
ParamType.getUnqualifiedType(), false,
ObjCLifetimeConversion))
RefExpr = ImpCastExprToType(RefExpr.get(), ParamType.getUnqualifiedType(), CK_NoOp);
assert(!RefExpr.isInvalid() &&
Context.hasSameType(((Expr*) RefExpr.get())->getType(),
ParamType.getUnqualifiedType()));
return RefExpr;
}
}
QualType T = VD->getType().getNonReferenceType();
if (ParamType->isPointerType()) {
// When the non-type template parameter is a pointer, take the
// address of the declaration.
ExprResult RefExpr = BuildDeclRefExpr(VD, T, VK_LValue, Loc);
if (RefExpr.isInvalid())
return ExprError();
if (!Context.hasSameUnqualifiedType(ParamType->getPointeeType(), T) &&
(T->isFunctionType() || T->isArrayType())) {
// Decay functions and arrays unless we're forming a pointer to array.
RefExpr = DefaultFunctionArrayConversion(RefExpr.get());
if (RefExpr.isInvalid())
return ExprError();
return RefExpr;
}
// Take the address of everything else
return CreateBuiltinUnaryOp(Loc, UO_AddrOf, RefExpr.get());
}
ExprValueKind VK = VK_RValue;
// If the non-type template parameter has reference type, qualify the
// resulting declaration reference with the extra qualifiers on the
// type that the reference refers to.
if (const ReferenceType *TargetRef = ParamType->getAs<ReferenceType>()) {
VK = VK_LValue;
T = Context.getQualifiedType(T,
TargetRef->getPointeeType().getQualifiers());
} else if (isa<FunctionDecl>(VD)) {
// References to functions are always lvalues.
VK = VK_LValue;
}
return BuildDeclRefExpr(VD, T, VK, Loc);
}
/// Construct a new expression that refers to the given
/// integral template argument with the given source-location
/// information.
///
/// This routine takes care of the mapping from an integral template
/// argument (which may have any integral type) to the appropriate
/// literal value.
ExprResult
Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc) {
assert(Arg.getKind() == TemplateArgument::Integral &&
"Operation is only valid for integral template arguments");
QualType OrigT = Arg.getIntegralType();
// If this is an enum type that we're instantiating, we need to use an integer
// type the same size as the enumerator. We don't want to build an
// IntegerLiteral with enum type. The integer type of an enum type can be of
// any integral type with C++11 enum classes, make sure we create the right
// type of literal for it.
QualType T = OrigT;
if (const EnumType *ET = OrigT->getAs<EnumType>())
T = ET->getDecl()->getIntegerType();
Expr *E;
if (T->isAnyCharacterType()) {
CharacterLiteral::CharacterKind Kind;
if (T->isWideCharType())
Kind = CharacterLiteral::Wide;
else if (T->isChar8Type() && getLangOpts().Char8)
Kind = CharacterLiteral::UTF8;
else if (T->isChar16Type())
Kind = CharacterLiteral::UTF16;
else if (T->isChar32Type())
Kind = CharacterLiteral::UTF32;
else
Kind = CharacterLiteral::Ascii;
E = new (Context) CharacterLiteral(Arg.getAsIntegral().getZExtValue(),
Kind, T, Loc);
} else if (T->isBooleanType()) {
E = new (Context) CXXBoolLiteralExpr(Arg.getAsIntegral().getBoolValue(),
T, Loc);
} else if (T->isNullPtrType()) {
E = new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc);
} else {
E = IntegerLiteral::Create(Context, Arg.getAsIntegral(), T, Loc);
}
if (OrigT->isEnumeralType()) {
// FIXME: This is a hack. We need a better way to handle substituted
// non-type template parameters.
E = CStyleCastExpr::Create(Context, OrigT, VK_RValue, CK_IntegralCast, E,
nullptr,
Context.getTrivialTypeSourceInfo(OrigT, Loc),
Loc, Loc);
}
return E;
}
/// Match two template parameters within template parameter lists.
static bool MatchTemplateParameterKind(Sema &S, NamedDecl *New, NamedDecl *Old,
bool Complain,
Sema::TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc) {
// Check the actual kind (type, non-type, template).
if (Old->getKind() != New->getKind()) {
if (Complain) {
unsigned NextDiag = diag::err_template_param_different_kind;
if (TemplateArgLoc.isValid()) {
S.Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch);
NextDiag = diag::note_template_param_different_kind;
}
S.Diag(New->getLocation(), NextDiag)
<< (Kind != Sema::TPL_TemplateMatch);
S.Diag(Old->getLocation(), diag::note_template_prev_declaration)
<< (Kind != Sema::TPL_TemplateMatch);
}
return false;
}
// Check that both are parameter packs or neither are parameter packs.
// However, if we are matching a template template argument to a
// template template parameter, the template template parameter can have
// a parameter pack where the template template argument does not.
if (Old->isTemplateParameterPack() != New->isTemplateParameterPack() &&
!(Kind == Sema::TPL_TemplateTemplateArgumentMatch &&
Old->isTemplateParameterPack())) {
if (Complain) {
unsigned NextDiag = diag::err_template_parameter_pack_non_pack;
if (TemplateArgLoc.isValid()) {
S.Diag(TemplateArgLoc,
diag::err_template_arg_template_params_mismatch);
NextDiag = diag::note_template_parameter_pack_non_pack;
}
unsigned ParamKind = isa<TemplateTypeParmDecl>(New)? 0
: isa<NonTypeTemplateParmDecl>(New)? 1
: 2;
S.Diag(New->getLocation(), NextDiag)
<< ParamKind << New->isParameterPack();
S.Diag(Old->getLocation(), diag::note_template_parameter_pack_here)
<< ParamKind << Old->isParameterPack();
}
return false;
}
// For non-type template parameters, check the type of the parameter.
if (NonTypeTemplateParmDecl *OldNTTP
= dyn_cast<NonTypeTemplateParmDecl>(Old)) {
NonTypeTemplateParmDecl *NewNTTP = cast<NonTypeTemplateParmDecl>(New);
// If we are matching a template template argument to a template
// template parameter and one of the non-type template parameter types
// is dependent, then we must wait until template instantiation time
// to actually compare the arguments.
if (Kind == Sema::TPL_TemplateTemplateArgumentMatch &&
(OldNTTP->getType()->isDependentType() ||
NewNTTP->getType()->isDependentType()))
return true;
if (!S.Context.hasSameType(OldNTTP->getType(), NewNTTP->getType())) {
if (Complain) {
unsigned NextDiag = diag::err_template_nontype_parm_different_type;
if (TemplateArgLoc.isValid()) {
S.Diag(TemplateArgLoc,
diag::err_template_arg_template_params_mismatch);
NextDiag = diag::note_template_nontype_parm_different_type;
}
S.Diag(NewNTTP->getLocation(), NextDiag)
<< NewNTTP->getType()
<< (Kind != Sema::TPL_TemplateMatch);
S.Diag(OldNTTP->getLocation(),
diag::note_template_nontype_parm_prev_declaration)
<< OldNTTP->getType();
}
return false;
}
return true;
}
// For template template parameters, check the template parameter types.
// The template parameter lists of template template
// parameters must agree.
if (TemplateTemplateParmDecl *OldTTP
= dyn_cast<TemplateTemplateParmDecl>(Old)) {
TemplateTemplateParmDecl *NewTTP = cast<TemplateTemplateParmDecl>(New);
return S.TemplateParameterListsAreEqual(NewTTP->getTemplateParameters(),
OldTTP->getTemplateParameters(),
Complain,
(Kind == Sema::TPL_TemplateMatch
? Sema::TPL_TemplateTemplateParmMatch
: Kind),
TemplateArgLoc);
}
return true;
}
/// Diagnose a known arity mismatch when comparing template argument
/// lists.
static
void DiagnoseTemplateParameterListArityMismatch(Sema &S,
TemplateParameterList *New,
TemplateParameterList *Old,
Sema::TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc) {
unsigned NextDiag = diag::err_template_param_list_different_arity;
if (TemplateArgLoc.isValid()) {
S.Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch);
NextDiag = diag::note_template_param_list_different_arity;
}
S.Diag(New->getTemplateLoc(), NextDiag)
<< (New->size() > Old->size())
<< (Kind != Sema::TPL_TemplateMatch)
<< SourceRange(New->getTemplateLoc(), New->getRAngleLoc());
S.Diag(Old->getTemplateLoc(), diag::note_template_prev_declaration)
<< (Kind != Sema::TPL_TemplateMatch)
<< SourceRange(Old->getTemplateLoc(), Old->getRAngleLoc());
}
/// Determine whether the given template parameter lists are
/// equivalent.
///
/// \param New The new template parameter list, typically written in the
/// source code as part of a new template declaration.
///
/// \param Old The old template parameter list, typically found via
/// name lookup of the template declared with this template parameter
/// list.
///
/// \param Complain If true, this routine will produce a diagnostic if
/// the template parameter lists are not equivalent.
///
/// \param Kind describes how we are to match the template parameter lists.
///
/// \param TemplateArgLoc If this source location is valid, then we
/// are actually checking the template parameter list of a template
/// argument (New) against the template parameter list of its
/// corresponding template template parameter (Old). We produce
/// slightly different diagnostics in this scenario.
///
/// \returns True if the template parameter lists are equal, false
/// otherwise.
bool
Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc) {
if (Old->size() != New->size() && Kind != TPL_TemplateTemplateArgumentMatch) {
if (Complain)
DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
TemplateArgLoc);
return false;
}
// C++0x [temp.arg.template]p3:
// A template-argument matches a template template-parameter (call it P)
// when each of the template parameters in the template-parameter-list of
// the template-argument's corresponding class template or alias template
// (call it A) matches the corresponding template parameter in the
// template-parameter-list of P. [...]
TemplateParameterList::iterator NewParm = New->begin();
TemplateParameterList::iterator NewParmEnd = New->end();
for (TemplateParameterList::iterator OldParm = Old->begin(),
OldParmEnd = Old->end();
OldParm != OldParmEnd; ++OldParm) {
if (Kind != TPL_TemplateTemplateArgumentMatch ||
!(*OldParm)->isTemplateParameterPack()) {
if (NewParm == NewParmEnd) {
if (Complain)
DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
TemplateArgLoc);
return false;
}
if (!MatchTemplateParameterKind(*this, *NewParm, *OldParm, Complain,
Kind, TemplateArgLoc))
return false;
++NewParm;
continue;
}
// C++0x [temp.arg.template]p3:
// [...] When P's template- parameter-list contains a template parameter
// pack (14.5.3), the template parameter pack will match zero or more
// template parameters or template parameter packs in the
// template-parameter-list of A with the same type and form as the
// template parameter pack in P (ignoring whether those template
// parameters are template parameter packs).
for (; NewParm != NewParmEnd; ++NewParm) {
if (!MatchTemplateParameterKind(*this, *NewParm, *OldParm, Complain,
Kind, TemplateArgLoc))
return false;
}
}
// Make sure we exhausted all of the arguments.
if (NewParm != NewParmEnd) {
if (Complain)
DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
TemplateArgLoc);
return false;
}
return true;
}
/// Check whether a template can be declared within this scope.
///
/// If the template declaration is valid in this scope, returns
/// false. Otherwise, issues a diagnostic and returns true.
bool
Sema::CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams) {
if (!S)
return false;
// Find the nearest enclosing declaration scope.
while ((S->getFlags() & Scope::DeclScope) == 0 ||
(S->getFlags() & Scope::TemplateParamScope) != 0)
S = S->getParent();
// C++ [temp]p4:
// A template [...] shall not have C linkage.
DeclContext *Ctx = S->getEntity();
if (Ctx && Ctx->isExternCContext()) {
Diag(TemplateParams->getTemplateLoc(), diag::err_template_linkage)
<< TemplateParams->getSourceRange();
if (const LinkageSpecDecl *LSD = Ctx->getExternCContext())
Diag(LSD->getExternLoc(), diag::note_extern_c_begins_here);
return true;
}
Ctx = Ctx->getRedeclContext();
// C++ [temp]p2:
// A template-declaration can appear only as a namespace scope or
// class scope declaration.
if (Ctx) {
if (Ctx->isFileContext())
return false;
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Ctx)) {
// C++ [temp.mem]p2:
// A local class shall not have member templates.
if (RD->isLocalClass())
return Diag(TemplateParams->getTemplateLoc(),
diag::err_template_inside_local_class)
<< TemplateParams->getSourceRange();
else
return false;
}
}
return Diag(TemplateParams->getTemplateLoc(),
diag::err_template_outside_namespace_or_class_scope)
<< TemplateParams->getSourceRange();
}
/// Determine what kind of template specialization the given declaration
/// is.
static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D) {
if (!D)
return TSK_Undeclared;
if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D))
return Record->getTemplateSpecializationKind();
if (FunctionDecl *Function = dyn_cast<FunctionDecl>(D))
return Function->getTemplateSpecializationKind();
if (VarDecl *Var = dyn_cast<VarDecl>(D))
return Var->getTemplateSpecializationKind();
return TSK_Undeclared;
}
/// Check whether a specialization is well-formed in the current
/// context.
///
/// This routine determines whether a template specialization can be declared
/// in the current context (C++ [temp.expl.spec]p2).
///
/// \param S the semantic analysis object for which this check is being
/// performed.
///
/// \param Specialized the entity being specialized or instantiated, which
/// may be a kind of template (class template, function template, etc.) or
/// a member of a class template (member function, static data member,
/// member class).
///
/// \param PrevDecl the previous declaration of this entity, if any.
///
/// \param Loc the location of the explicit specialization or instantiation of
/// this entity.
///
/// \param IsPartialSpecialization whether this is a partial specialization of
/// a class template.
///
/// \returns true if there was an error that we cannot recover from, false
/// otherwise.
static bool CheckTemplateSpecializationScope(Sema &S,
NamedDecl *Specialized,
NamedDecl *PrevDecl,
SourceLocation Loc,
bool IsPartialSpecialization) {
// Keep these "kind" numbers in sync with the %select statements in the
// various diagnostics emitted by this routine.
int EntityKind = 0;
if (isa<ClassTemplateDecl>(Specialized))
EntityKind = IsPartialSpecialization? 1 : 0;
else if (isa<VarTemplateDecl>(Specialized))
EntityKind = IsPartialSpecialization ? 3 : 2;
else if (isa<FunctionTemplateDecl>(Specialized))
EntityKind = 4;
else if (isa<CXXMethodDecl>(Specialized))
EntityKind = 5;
else if (isa<VarDecl>(Specialized))
EntityKind = 6;
else if (isa<RecordDecl>(Specialized))
EntityKind = 7;
else if (isa<EnumDecl>(Specialized) && S.getLangOpts().CPlusPlus11)
EntityKind = 8;
else {
S.Diag(Loc, diag::err_template_spec_unknown_kind)
<< S.getLangOpts().CPlusPlus11;
S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
return true;
}
// C++ [temp.expl.spec]p2:
// An explicit specialization may be declared in any scope in which
// the corresponding primary template may be defined.
if (S.CurContext->getRedeclContext()->isFunctionOrMethod()) {
S.Diag(Loc, diag::err_template_spec_decl_function_scope)
<< Specialized;
return true;
}
// C++ [temp.class.spec]p6:
// A class template partial specialization may be declared in any
// scope in which the primary template may be defined.
DeclContext *SpecializedContext =
Specialized->getDeclContext()->getRedeclContext();
DeclContext *DC = S.CurContext->getRedeclContext();
// Make sure that this redeclaration (or definition) occurs in the same
// scope or an enclosing namespace.
if (!(DC->isFileContext() ? DC->Encloses(SpecializedContext)
: DC->Equals(SpecializedContext))) {
if (isa<TranslationUnitDecl>(SpecializedContext))
S.Diag(Loc, diag::err_template_spec_redecl_global_scope)
<< EntityKind << Specialized;
else {
auto *ND = cast<NamedDecl>(SpecializedContext);
int Diag = diag::err_template_spec_redecl_out_of_scope;
if (S.getLangOpts().MicrosoftExt && !DC->isRecord())
Diag = diag::ext_ms_template_spec_redecl_out_of_scope;
S.Diag(Loc, Diag) << EntityKind << Specialized
<< ND << isa<CXXRecordDecl>(ND);
}
S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
// Don't allow specializing in the wrong class during error recovery.
// Otherwise, things can go horribly wrong.
if (DC->isRecord())
return true;
}
return false;
}
static SourceRange findTemplateParameterInType(unsigned Depth, Expr *E) {
if (!E->isTypeDependent())
return SourceLocation();
DependencyChecker Checker(Depth, /*IgnoreNonTypeDependent*/true);
Checker.TraverseStmt(E);
if (Checker.MatchLoc.isInvalid())
return E->getSourceRange();
return Checker.MatchLoc;
}
static SourceRange findTemplateParameter(unsigned Depth, TypeLoc TL) {
if (!TL.getType()->isDependentType())
return SourceLocation();
DependencyChecker Checker(Depth, /*IgnoreNonTypeDependent*/true);
Checker.TraverseTypeLoc(TL);
if (Checker.MatchLoc.isInvalid())
return TL.getSourceRange();
return Checker.MatchLoc;
}
/// Subroutine of Sema::CheckTemplatePartialSpecializationArgs
/// that checks non-type template partial specialization arguments.
static bool CheckNonTypeTemplatePartialSpecializationArgs(
Sema &S, SourceLocation TemplateNameLoc, NonTypeTemplateParmDecl *Param,
const TemplateArgument *Args, unsigned NumArgs, bool IsDefaultArgument) {
for (unsigned I = 0; I != NumArgs; ++I) {
if (Args[I].getKind() == TemplateArgument::Pack) {
if (CheckNonTypeTemplatePartialSpecializationArgs(
S, TemplateNameLoc, Param, Args[I].pack_begin(),
Args[I].pack_size(), IsDefaultArgument))
return true;
continue;
}
if (Args[I].getKind() != TemplateArgument::Expression)
continue;
Expr *ArgExpr = Args[I].getAsExpr();
// We can have a pack expansion of any of the bullets below.
if (PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(ArgExpr))
ArgExpr = Expansion->getPattern();
// Strip off any implicit casts we added as part of type checking.
while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr))
ArgExpr = ICE->getSubExpr();
// C++ [temp.class.spec]p8:
// A non-type argument is non-specialized if it is the name of a
// non-type parameter. All other non-type arguments are
// specialized.
//
// Below, we check the two conditions that only apply to
// specialized non-type arguments, so skip any non-specialized
// arguments.
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ArgExpr))
if (isa<NonTypeTemplateParmDecl>(DRE->getDecl()))
continue;
// C++ [temp.class.spec]p9:
// Within the argument list of a class template partial
// specialization, the following restrictions apply:
// -- A partially specialized non-type argument expression
// shall not involve a template parameter of the partial
// specialization except when the argument expression is a
// simple identifier.
// -- The type of a template parameter corresponding to a
// specialized non-type argument shall not be dependent on a
// parameter of the specialization.
// DR1315 removes the first bullet, leaving an incoherent set of rules.
// We implement a compromise between the original rules and DR1315:
// -- A specialized non-type template argument shall not be
// type-dependent and the corresponding template parameter
// shall have a non-dependent type.
SourceRange ParamUseRange =
findTemplateParameterInType(Param->getDepth(), ArgExpr);
if (ParamUseRange.isValid()) {
if (IsDefaultArgument) {
S.Diag(TemplateNameLoc,
diag::err_dependent_non_type_arg_in_partial_spec);
S.Diag(ParamUseRange.getBegin(),
diag::note_dependent_non_type_default_arg_in_partial_spec)
<< ParamUseRange;
} else {
S.Diag(ParamUseRange.getBegin(),
diag::err_dependent_non_type_arg_in_partial_spec)
<< ParamUseRange;
}
return true;
}
ParamUseRange = findTemplateParameter(
Param->getDepth(), Param->getTypeSourceInfo()->getTypeLoc());
if (ParamUseRange.isValid()) {
S.Diag(IsDefaultArgument ? TemplateNameLoc : ArgExpr->getLocStart(),
diag::err_dependent_typed_non_type_arg_in_partial_spec)
<< Param->getType();
S.Diag(Param->getLocation(), diag::note_template_param_here)
<< (IsDefaultArgument ? ParamUseRange : SourceRange())
<< ParamUseRange;
return true;
}
}
return false;
}
/// Check the non-type template arguments of a class template
/// partial specialization according to C++ [temp.class.spec]p9.
///
/// \param TemplateNameLoc the location of the template name.
/// \param PrimaryTemplate the template parameters of the primary class
/// template.
/// \param NumExplicit the number of explicitly-specified template arguments.
/// \param TemplateArgs the template arguments of the class template
/// partial specialization.
///
/// \returns \c true if there was an error, \c false otherwise.
bool Sema::CheckTemplatePartialSpecializationArgs(
SourceLocation TemplateNameLoc, TemplateDecl *PrimaryTemplate,
unsigned NumExplicit, ArrayRef<TemplateArgument> TemplateArgs) {
// We have to be conservative when checking a template in a dependent
// context.
if (PrimaryTemplate->getDeclContext()->isDependentContext())
return false;
TemplateParameterList *TemplateParams =
PrimaryTemplate->getTemplateParameters();
for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
NonTypeTemplateParmDecl *Param
= dyn_cast<NonTypeTemplateParmDecl>(TemplateParams->getParam(I));
if (!Param)
continue;
if (CheckNonTypeTemplatePartialSpecializationArgs(*this, TemplateNameLoc,
Param, &TemplateArgs[I],
1, I >= NumExplicit))
return true;
}
return false;
}
DeclResult Sema::ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody) {
assert(TUK != TUK_Reference && "References are not specializations");
CXXScopeSpec &SS = TemplateId.SS;
// NOTE: KWLoc is the location of the tag keyword. This will instead
// store the location of the outermost template keyword in the declaration.
SourceLocation TemplateKWLoc = TemplateParameterLists.size() > 0
? TemplateParameterLists[0]->getTemplateLoc() : KWLoc;
SourceLocation TemplateNameLoc = TemplateId.TemplateNameLoc;
SourceLocation LAngleLoc = TemplateId.LAngleLoc;
SourceLocation RAngleLoc = TemplateId.RAngleLoc;
// Find the class template we're specializing
TemplateName Name = TemplateId.Template.get();
ClassTemplateDecl *ClassTemplate
= dyn_cast_or_null<ClassTemplateDecl>(Name.getAsTemplateDecl());
if (!ClassTemplate) {
Diag(TemplateNameLoc, diag::err_not_class_template_specialization)
<< (Name.getAsTemplateDecl() &&
isa<TemplateTemplateParmDecl>(Name.getAsTemplateDecl()));
return true;
}
bool isMemberSpecialization = false;
bool isPartialSpecialization = false;
// Check the validity of the template headers that introduce this
// template.
// FIXME: We probably shouldn't complain about these headers for
// friend declarations.
bool Invalid = false;
TemplateParameterList *TemplateParams =
MatchTemplateParametersToScopeSpecifier(
KWLoc, TemplateNameLoc, SS, &TemplateId,
TemplateParameterLists, TUK == TUK_Friend, isMemberSpecialization,
Invalid);
if (Invalid)
return true;
if (TemplateParams && TemplateParams->size() > 0) {
isPartialSpecialization = true;
if (TUK == TUK_Friend) {
Diag(KWLoc, diag::err_partial_specialization_friend)
<< SourceRange(LAngleLoc, RAngleLoc);
return true;
}
// C++ [temp.class.spec]p10:
// The template parameter list of a specialization shall not
// contain default template argument values.
for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
Decl *Param = TemplateParams->getParam(I);
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
if (TTP->hasDefaultArgument()) {
Diag(TTP->getDefaultArgumentLoc(),
diag::err_default_arg_in_partial_spec);
TTP->removeDefaultArgument();
}
} else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(Param)) {
if (Expr *DefArg = NTTP->getDefaultArgument()) {
Diag(NTTP->getDefaultArgumentLoc(),
diag::err_default_arg_in_partial_spec)
<< DefArg->getSourceRange();
NTTP->removeDefaultArgument();
}
} else {
TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(Param);
if (TTP->hasDefaultArgument()) {
Diag(TTP->getDefaultArgument().getLocation(),
diag::err_default_arg_in_partial_spec)
<< TTP->getDefaultArgument().getSourceRange();
TTP->removeDefaultArgument();
}
}
}
} else if (TemplateParams) {
if (TUK == TUK_Friend)
Diag(KWLoc, diag::err_template_spec_friend)
<< FixItHint::CreateRemoval(
SourceRange(TemplateParams->getTemplateLoc(),
TemplateParams->getRAngleLoc()))
<< SourceRange(LAngleLoc, RAngleLoc);
} else {
assert(TUK == TUK_Friend && "should have a 'template<>' for this decl");
}
// Check that the specialization uses the same tag kind as the
// original template.
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
assert(Kind != TTK_Enum && "Invalid enum tag in class template spec!");
if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(),
Kind, TUK == TUK_Definition, KWLoc,
ClassTemplate->getIdentifier())) {
Diag(KWLoc, diag::err_use_with_wrong_tag)
<< ClassTemplate
<< FixItHint::CreateReplacement(KWLoc,
ClassTemplate->getTemplatedDecl()->getKindName());
Diag(ClassTemplate->getTemplatedDecl()->getLocation(),
diag::note_previous_use);
Kind = ClassTemplate->getTemplatedDecl()->getTagKind();
}
// Translate the parser's template argument list in our AST format.
TemplateArgumentListInfo TemplateArgs =
makeTemplateArgumentListInfo(*this, TemplateId);
// Check for unexpanded parameter packs in any of the template arguments.
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
if (DiagnoseUnexpandedParameterPack(TemplateArgs[I],
UPPC_PartialSpecialization))
return true;
// Check that the template argument list is well-formed for this
// template.
SmallVector<TemplateArgument, 4> Converted;
if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc,
TemplateArgs, false, Converted))
return true;
// Find the class template (partial) specialization declaration that
// corresponds to these arguments.
if (isPartialSpecialization) {
if (CheckTemplatePartialSpecializationArgs(TemplateNameLoc, ClassTemplate,
TemplateArgs.size(), Converted))
return true;
// FIXME: Move this to CheckTemplatePartialSpecializationArgs so we
// also do it during instantiation.
bool InstantiationDependent;
if (!Name.isDependent() &&
!TemplateSpecializationType::anyDependentTemplateArguments(
TemplateArgs.arguments(), InstantiationDependent)) {
Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized)
<< ClassTemplate->getDeclName();
isPartialSpecialization = false;
}
}
void *InsertPos = nullptr;
ClassTemplateSpecializationDecl *PrevDecl = nullptr;
if (isPartialSpecialization)
// FIXME: Template parameter list matters, too
PrevDecl = ClassTemplate->findPartialSpecialization(Converted, InsertPos);
else
PrevDecl = ClassTemplate->findSpecialization(Converted, InsertPos);
ClassTemplateSpecializationDecl *Specialization = nullptr;
// Check whether we can declare a class template specialization in
// the current scope.
if (TUK != TUK_Friend &&
CheckTemplateSpecializationScope(*this, ClassTemplate, PrevDecl,
TemplateNameLoc,
isPartialSpecialization))
return true;
// The canonical type
QualType CanonType;
if (isPartialSpecialization) {
// Build the canonical type that describes the converted template
// arguments of the class template partial specialization.
TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name);
CanonType = Context.getTemplateSpecializationType(CanonTemplate,
Converted);
if (Context.hasSameType(CanonType,
ClassTemplate->getInjectedClassNameSpecialization())) {
// C++ [temp.class.spec]p9b3:
//
// -- The argument list of the specialization shall not be identical
// to the implicit argument list of the primary template.
//
// This rule has since been removed, because it's redundant given DR1495,
// but we keep it because it produces better diagnostics and recovery.
Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template)
<< /*class template*/0 << (TUK == TUK_Definition)
<< FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
return CheckClassTemplate(S, TagSpec, TUK, KWLoc, SS,
ClassTemplate->getIdentifier(),
TemplateNameLoc,
Attr,
TemplateParams,
AS_none, /*ModulePrivateLoc=*/SourceLocation(),
/*FriendLoc*/SourceLocation(),
TemplateParameterLists.size() - 1,
TemplateParameterLists.data());
}
// Create a new class template partial specialization declaration node.
ClassTemplatePartialSpecializationDecl *PrevPartial
= cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl);
ClassTemplatePartialSpecializationDecl *Partial
= ClassTemplatePartialSpecializationDecl::Create(Context, Kind,
ClassTemplate->getDeclContext(),
KWLoc, TemplateNameLoc,
TemplateParams,
ClassTemplate,
Converted,
TemplateArgs,
CanonType,
PrevPartial);
SetNestedNameSpecifier(Partial, SS);
if (TemplateParameterLists.size() > 1 && SS.isSet()) {
Partial->setTemplateParameterListsInfo(
Context, TemplateParameterLists.drop_back(1));
}
if (!PrevPartial)
ClassTemplate->AddPartialSpecialization(Partial, InsertPos);
Specialization = Partial;
// If we are providing an explicit specialization of a member class
// template specialization, make a note of that.
if (PrevPartial && PrevPartial->getInstantiatedFromMember())
PrevPartial->setMemberSpecialization();
CheckTemplatePartialSpecialization(Partial);
} else {
// Create a new class template specialization declaration node for
// this explicit specialization or friend declaration.
Specialization
= ClassTemplateSpecializationDecl::Create(Context, Kind,
ClassTemplate->getDeclContext(),
KWLoc, TemplateNameLoc,
ClassTemplate,
Converted,
PrevDecl);
SetNestedNameSpecifier(Specialization, SS);
if (TemplateParameterLists.size() > 0) {
Specialization->setTemplateParameterListsInfo(Context,
TemplateParameterLists);
}
if (!PrevDecl)
ClassTemplate->AddSpecialization(Specialization, InsertPos);
if (CurContext->isDependentContext()) {
TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name);
CanonType = Context.getTemplateSpecializationType(
CanonTemplate, Converted);
} else {
CanonType = Context.getTypeDeclType(Specialization);
}
}
// C++ [temp.expl.spec]p6:
// If a template, a member template or the member of a class template is
// explicitly specialized then that specialization shall be declared
// before the first use of that specialization that would cause an implicit
// instantiation to take place, in every translation unit in which such a
// use occurs; no diagnostic is required.
if (PrevDecl && PrevDecl->getPointOfInstantiation().isValid()) {
bool Okay = false;
for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
// Is there any previous explicit specialization declaration?
if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) {
Okay = true;
break;
}
}
if (!Okay) {
SourceRange Range(TemplateNameLoc, RAngleLoc);
Diag(TemplateNameLoc, diag::err_specialization_after_instantiation)
<< Context.getTypeDeclType(Specialization) << Range;
Diag(PrevDecl->getPointOfInstantiation(),
diag::note_instantiation_required_here)
<< (PrevDecl->getTemplateSpecializationKind()
!= TSK_ImplicitInstantiation);
return true;
}
}
// If this is not a friend, note that this is an explicit specialization.
if (TUK != TUK_Friend)
Specialization->setSpecializationKind(TSK_ExplicitSpecialization);
// Check that this isn't a redefinition of this specialization.
if (TUK == TUK_Definition) {
RecordDecl *Def = Specialization->getDefinition();
NamedDecl *Hidden = nullptr;
if (Def && SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
SkipBody->ShouldSkip = true;
makeMergedDefinitionVisible(Hidden);
// From here on out, treat this as just a redeclaration.
TUK = TUK_Declaration;
} else if (Def) {
SourceRange Range(TemplateNameLoc, RAngleLoc);
Diag(TemplateNameLoc, diag::err_redefinition) << Specialization << Range;
Diag(Def->getLocation(), diag::note_previous_definition);
Specialization->setInvalidDecl();
return true;
}
}
ProcessDeclAttributeList(S, Specialization, Attr);
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
if (TUK == TUK_Definition) {
AddAlignmentAttributesForRecord(Specialization);
AddMsStructLayoutForRecord(Specialization);
}
if (ModulePrivateLoc.isValid())
Diag(Specialization->getLocation(), diag::err_module_private_specialization)
<< (isPartialSpecialization? 1 : 0)
<< FixItHint::CreateRemoval(ModulePrivateLoc);
// Build the fully-sugared type for this class template
// specialization as the user wrote in the specialization
// itself. This means that we'll pretty-print the type retrieved
// from the specialization's declaration the way that the user
// actually wrote the specialization, rather than formatting the
// name based on the "canonical" representation used to store the
// template arguments in the specialization.
TypeSourceInfo *WrittenTy
= Context.getTemplateSpecializationTypeInfo(Name, TemplateNameLoc,
TemplateArgs, CanonType);
if (TUK != TUK_Friend) {
Specialization->setTypeAsWritten(WrittenTy);
Specialization->setTemplateKeywordLoc(TemplateKWLoc);
}
// C++ [temp.expl.spec]p9:
// A template explicit specialization is in the scope of the
// namespace in which the template was defined.
//
// We actually implement this paragraph where we set the semantic
// context (in the creation of the ClassTemplateSpecializationDecl),
// but we also maintain the lexical context where the actual
// definition occurs.
Specialization->setLexicalDeclContext(CurContext);
// We may be starting the definition of this specialization.
if (TUK == TUK_Definition)
Specialization->startDefinition();
if (TUK == TUK_Friend) {
FriendDecl *Friend = FriendDecl::Create(Context, CurContext,
TemplateNameLoc,
WrittenTy,
/*FIXME:*/KWLoc);
Friend->setAccess(AS_public);
CurContext->addDecl(Friend);
} else {
// Add the specialization into its lexical context, so that it can
// be seen when iterating through the list of declarations in that
// context. However, specializations are not found by name lookup.
CurContext->addDecl(Specialization);
}
return Specialization;
}
Decl *Sema::ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D) {
Decl *NewDecl = HandleDeclarator(S, D, TemplateParameterLists);
ActOnDocumentableDecl(NewDecl);
return NewDecl;
}
/// Strips various properties off an implicit instantiation
/// that has just been explicitly specialized.
static void StripImplicitInstantiation(NamedDecl *D) {
D->dropAttr<DLLImportAttr>();
D->dropAttr<DLLExportAttr>();
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
FD->setInlineSpecified(false);
}
/// Compute the diagnostic location for an explicit instantiation
// declaration or definition.
static SourceLocation DiagLocForExplicitInstantiation(
NamedDecl* D, SourceLocation PointOfInstantiation) {
// Explicit instantiations following a specialization have no effect and
// hence no PointOfInstantiation. In that case, walk decl backwards
// until a valid name loc is found.
SourceLocation PrevDiagLoc = PointOfInstantiation;
for (Decl *Prev = D; Prev && !PrevDiagLoc.isValid();
Prev = Prev->getPreviousDecl()) {
PrevDiagLoc = Prev->getLocation();
}
assert(PrevDiagLoc.isValid() &&
"Explicit instantiation without point of instantiation?");
return PrevDiagLoc;
}
/// Diagnose cases where we have an explicit template specialization
/// before/after an explicit template instantiation, producing diagnostics
/// for those cases where they are required and determining whether the
/// new specialization/instantiation will have any effect.
///
/// \param NewLoc the location of the new explicit specialization or
/// instantiation.
///
/// \param NewTSK the kind of the new explicit specialization or instantiation.
///
/// \param PrevDecl the previous declaration of the entity.
///
/// \param PrevTSK the kind of the old explicit specialization or instantiatin.
///
/// \param PrevPointOfInstantiation if valid, indicates where the previus
/// declaration was instantiated (either implicitly or explicitly).
///
/// \param HasNoEffect will be set to true to indicate that the new
/// specialization or instantiation has no effect and should be ignored.
///
/// \returns true if there was an error that should prevent the introduction of
/// the new declaration into the AST, false otherwise.
bool
Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPointOfInstantiation,
bool &HasNoEffect) {
HasNoEffect = false;
switch (NewTSK) {
case TSK_Undeclared:
case TSK_ImplicitInstantiation:
assert(
(PrevTSK == TSK_Undeclared || PrevTSK == TSK_ImplicitInstantiation) &&
"previous declaration must be implicit!");
return false;
case TSK_ExplicitSpecialization:
switch (PrevTSK) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
// Okay, we're just specializing something that is either already
// explicitly specialized or has merely been mentioned without any
// instantiation.
return false;
case TSK_ImplicitInstantiation:
if (PrevPointOfInstantiation.isInvalid()) {
// The declaration itself has not actually been instantiated, so it is
// still okay to specialize it.
StripImplicitInstantiation(PrevDecl);
return false;
}
// Fall through
LLVM_FALLTHROUGH;
case TSK_ExplicitInstantiationDeclaration:
case TSK_ExplicitInstantiationDefinition:
assert((PrevTSK == TSK_ImplicitInstantiation ||
PrevPointOfInstantiation.isValid()) &&
"Explicit instantiation without point of instantiation?");
// C++ [temp.expl.spec]p6:
// If a template, a member template or the member of a class template
// is explicitly specialized then that specialization shall be declared
// before the first use of that specialization that would cause an
// implicit instantiation to take place, in every translation unit in
// which such a use occurs; no diagnostic is required.
for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
// Is there any previous explicit specialization declaration?
if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization)
return false;
}
Diag(NewLoc, diag::err_specialization_after_instantiation)
<< PrevDecl;
Diag(PrevPointOfInstantiation, diag::note_instantiation_required_here)
<< (PrevTSK != TSK_ImplicitInstantiation);
return true;
}
llvm_unreachable("The switch over PrevTSK must be exhaustive.");
case TSK_ExplicitInstantiationDeclaration:
switch (PrevTSK) {
case TSK_ExplicitInstantiationDeclaration:
// This explicit instantiation declaration is redundant (that's okay).
HasNoEffect = true;
return false;
case TSK_Undeclared:
case TSK_ImplicitInstantiation:
// We're explicitly instantiating something that may have already been
// implicitly instantiated; that's fine.
return false;
case TSK_ExplicitSpecialization:
// C++0x [temp.explicit]p4:
// For a given set of template parameters, if an explicit instantiation
// of a template appears after a declaration of an explicit
// specialization for that template, the explicit instantiation has no
// effect.
HasNoEffect = true;
return false;
case TSK_ExplicitInstantiationDefinition:
// C++0x [temp.explicit]p10:
// If an entity is the subject of both an explicit instantiation
// declaration and an explicit instantiation definition in the same
// translation unit, the definition shall follow the declaration.
Diag(NewLoc,
diag::err_explicit_instantiation_declaration_after_definition);
// Explicit instantiations following a specialization have no effect and
// hence no PrevPointOfInstantiation. In that case, walk decl backwards
// until a valid name loc is found.
Diag(DiagLocForExplicitInstantiation(PrevDecl, PrevPointOfInstantiation),
diag::note_explicit_instantiation_definition_here);
HasNoEffect = true;
return false;
}
case TSK_ExplicitInstantiationDefinition:
switch (PrevTSK) {
case TSK_Undeclared:
case TSK_ImplicitInstantiation:
// We're explicitly instantiating something that may have already been
// implicitly instantiated; that's fine.
return false;
case TSK_ExplicitSpecialization:
// C++ DR 259, C++0x [temp.explicit]p4:
// For a given set of template parameters, if an explicit
// instantiation of a template appears after a declaration of
// an explicit specialization for that template, the explicit
// instantiation has no effect.
Diag(NewLoc, diag::warn_explicit_instantiation_after_specialization)
<< PrevDecl;
Diag(PrevDecl->getLocation(),
diag::note_previous_template_specialization);
HasNoEffect = true;
return false;
case TSK_ExplicitInstantiationDeclaration:
// We're explicitly instantiating a definition for something for which we
// were previously asked to suppress instantiations. That's fine.
// C++0x [temp.explicit]p4:
// For a given set of template parameters, if an explicit instantiation
// of a template appears after a declaration of an explicit
// specialization for that template, the explicit instantiation has no
// effect.
for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
// Is there any previous explicit specialization declaration?
if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) {
HasNoEffect = true;
break;
}
}
return false;
case TSK_ExplicitInstantiationDefinition:
// C++0x [temp.spec]p5:
// For a given template and a given set of template-arguments,
// - an explicit instantiation definition shall appear at most once
// in a program,
// MSVCCompat: MSVC silently ignores duplicate explicit instantiations.
Diag(NewLoc, (getLangOpts().MSVCCompat)
? diag::ext_explicit_instantiation_duplicate
: diag::err_explicit_instantiation_duplicate)
<< PrevDecl;
Diag(DiagLocForExplicitInstantiation(PrevDecl, PrevPointOfInstantiation),
diag::note_previous_explicit_instantiation);
HasNoEffect = true;
return false;
}
}
llvm_unreachable("Missing specialization/instantiation case?");
}
/// Perform semantic analysis for the given dependent function
/// template specialization.
///
/// The only possible way to get a dependent function template specialization
/// is with a friend declaration, like so:
///
/// \code
/// template \<class T> void foo(T);
/// template \<class T> class A {
/// friend void foo<>(T);
/// };
/// \endcode
///
/// There really isn't any useful analysis we can do here, so we
/// just store the information.
bool
Sema::CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous) {
// Remove anything from Previous that isn't a function template in
// the correct context.
DeclContext *FDLookupContext = FD->getDeclContext()->getRedeclContext();
LookupResult::Filter F = Previous.makeFilter();
enum DiscardReason { NotAFunctionTemplate, NotAMemberOfEnclosing };
SmallVector<std::pair<DiscardReason, Decl *>, 8> DiscardedCandidates;
while (F.hasNext()) {
NamedDecl *D = F.next()->getUnderlyingDecl();
if (!isa<FunctionTemplateDecl>(D)) {
F.erase();
DiscardedCandidates.push_back(std::make_pair(NotAFunctionTemplate, D));
continue;
}
if (!FDLookupContext->InEnclosingNamespaceSetOf(
D->getDeclContext()->getRedeclContext())) {
F.erase();
DiscardedCandidates.push_back(std::make_pair(NotAMemberOfEnclosing, D));
continue;
}
}
F.done();
if (Previous.empty()) {
Diag(FD->getLocation(),
diag::err_dependent_function_template_spec_no_match);
for (auto &P : DiscardedCandidates)
Diag(P.second->getLocation(),
diag::note_dependent_function_template_spec_discard_reason)
<< P.first;
return true;
}
FD->setDependentTemplateSpecialization(Context, Previous.asUnresolvedSet(),
ExplicitTemplateArgs);
return false;
}
/// Perform semantic analysis for the given function template
/// specialization.
///
/// This routine performs all of the semantic analysis required for an
/// explicit function template specialization. On successful completion,
/// the function declaration \p FD will become a function template
/// specialization.
///
/// \param FD the function declaration, which will be updated to become a
/// function template specialization.
///
/// \param ExplicitTemplateArgs the explicitly-provided template arguments,
/// if any. Note that this may be valid info even when 0 arguments are
/// explicitly provided as in, e.g., \c void sort<>(char*, char*);
/// as it anyway contains info on the angle brackets locations.
///
/// \param Previous the set of declarations that may be specialized by
/// this function specialization.
bool Sema::CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous) {
// The set of function template specializations that could match this
// explicit function template specialization.
UnresolvedSet<8> Candidates;
TemplateSpecCandidateSet FailedCandidates(FD->getLocation(),
/*ForTakingAddress=*/false);
llvm::SmallDenseMap<FunctionDecl *, TemplateArgumentListInfo, 8>
ConvertedTemplateArgs;
DeclContext *FDLookupContext = FD->getDeclContext()->getRedeclContext();
for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
I != E; ++I) {
NamedDecl *Ovl = (*I)->getUnderlyingDecl();
if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Ovl)) {
// Only consider templates found within the same semantic lookup scope as
// FD.
if (!FDLookupContext->InEnclosingNamespaceSetOf(
Ovl->getDeclContext()->getRedeclContext()))
continue;
// When matching a constexpr member function template specialization
// against the primary template, we don't yet know whether the
// specialization has an implicit 'const' (because we don't know whether
// it will be a static member function until we know which template it
// specializes), so adjust it now assuming it specializes this template.
QualType FT = FD->getType();
if (FD->isConstexpr()) {
CXXMethodDecl *OldMD =
dyn_cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
if (OldMD && OldMD->isConst()) {
const FunctionProtoType *FPT = FT->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
EPI.TypeQuals |= Qualifiers::Const;
FT = Context.getFunctionType(FPT->getReturnType(),
FPT->getParamTypes(), EPI);
}
}
TemplateArgumentListInfo Args;
if (ExplicitTemplateArgs)
Args = *ExplicitTemplateArgs;
// C++ [temp.expl.spec]p11:
// A trailing template-argument can be left unspecified in the
// template-id naming an explicit function template specialization
// provided it can be deduced from the function argument type.
// Perform template argument deduction to determine whether we may be
// specializing this template.
// FIXME: It is somewhat wasteful to build
TemplateDeductionInfo Info(FailedCandidates.getLocation());
FunctionDecl *Specialization = nullptr;
if (TemplateDeductionResult TDK = DeduceTemplateArguments(
cast<FunctionTemplateDecl>(FunTmpl->getFirstDecl()),
ExplicitTemplateArgs ? &Args : nullptr, FT, Specialization,
Info)) {
// Template argument deduction failed; record why it failed, so
// that we can provide nifty diagnostics.
FailedCandidates.addCandidate().set(
I.getPair(), FunTmpl->getTemplatedDecl(),
MakeDeductionFailureInfo(Context, TDK, Info));
(void)TDK;
continue;
}
// Target attributes are part of the cuda function signature, so
// the deduced template's cuda target must match that of the
// specialization. Given that C++ template deduction does not
// take target attributes into account, we reject candidates
// here that have a different target.
if (LangOpts.CUDA &&
IdentifyCUDATarget(Specialization,
/* IgnoreImplicitHDAttributes = */ true) !=
IdentifyCUDATarget(FD, /* IgnoreImplicitHDAttributes = */ true)) {
FailedCandidates.addCandidate().set(
I.getPair(), FunTmpl->getTemplatedDecl(),
MakeDeductionFailureInfo(Context, TDK_CUDATargetMismatch, Info));
continue;
}
// Record this candidate.
if (ExplicitTemplateArgs)
ConvertedTemplateArgs[Specialization] = std::move(Args);
Candidates.addDecl(Specialization, I.getAccess());
}
}
// Find the most specialized function template.
UnresolvedSetIterator Result = getMostSpecialized(
Candidates.begin(), Candidates.end(), FailedCandidates,
FD->getLocation(),
PDiag(diag::err_function_template_spec_no_match) << FD->getDeclName(),
PDiag(diag::err_function_template_spec_ambiguous)
<< FD->getDeclName() << (ExplicitTemplateArgs != nullptr),
PDiag(diag::note_function_template_spec_matched));
if (Result == Candidates.end())
return true;
// Ignore access information; it doesn't figure into redeclaration checking.
FunctionDecl *Specialization = cast<FunctionDecl>(*Result);
FunctionTemplateSpecializationInfo *SpecInfo
= Specialization->getTemplateSpecializationInfo();
assert(SpecInfo && "Function template specialization info missing?");
// Note: do not overwrite location info if previous template
// specialization kind was explicit.
TemplateSpecializationKind TSK = SpecInfo->getTemplateSpecializationKind();
if (TSK == TSK_Undeclared || TSK == TSK_ImplicitInstantiation) {
Specialization->setLocation(FD->getLocation());
Specialization->setLexicalDeclContext(FD->getLexicalDeclContext());
// C++11 [dcl.constexpr]p1: An explicit specialization of a constexpr
// function can differ from the template declaration with respect to
// the constexpr specifier.
// FIXME: We need an update record for this AST mutation.
// FIXME: What if there are multiple such prior declarations (for instance,
// from different modules)?
Specialization->setConstexpr(FD->isConstexpr());
}
// FIXME: Check if the prior specialization has a point of instantiation.
// If so, we have run afoul of .
// If this is a friend declaration, then we're not really declaring
// an explicit specialization.
bool isFriend = (FD->getFriendObjectKind() != Decl::FOK_None);
// Check the scope of this explicit specialization.
if (!isFriend &&
CheckTemplateSpecializationScope(*this,
Specialization->getPrimaryTemplate(),
Specialization, FD->getLocation(),
false))
return true;
// C++ [temp.expl.spec]p6:
// If a template, a member template or the member of a class template is
// explicitly specialized then that specialization shall be declared
// before the first use of that specialization that would cause an implicit
// instantiation to take place, in every translation unit in which such a
// use occurs; no diagnostic is required.
bool HasNoEffect = false;
if (!isFriend &&
CheckSpecializationInstantiationRedecl(FD->getLocation(),
TSK_ExplicitSpecialization,
Specialization,
SpecInfo->getTemplateSpecializationKind(),
SpecInfo->getPointOfInstantiation(),
HasNoEffect))
return true;
// Mark the prior declaration as an explicit specialization, so that later
// clients know that this is an explicit specialization.
if (!isFriend) {
// Since explicit specializations do not inherit '=delete' from their
// primary function template - check if the 'specialization' that was
// implicitly generated (during template argument deduction for partial
// ordering) from the most specialized of all the function templates that
// 'FD' could have been specializing, has a 'deleted' definition. If so,
// first check that it was implicitly generated during template argument
// deduction by making sure it wasn't referenced, and then reset the deleted
// flag to not-deleted, so that we can inherit that information from 'FD'.
if (Specialization->isDeleted() && !SpecInfo->isExplicitSpecialization() &&
!Specialization->getCanonicalDecl()->isReferenced()) {
// FIXME: This assert will not hold in the presence of modules.
assert(
Specialization->getCanonicalDecl() == Specialization &&
"This must be the only existing declaration of this specialization");
// FIXME: We need an update record for this AST mutation.
Specialization->setDeletedAsWritten(false);
}
// FIXME: We need an update record for this AST mutation.
SpecInfo->setTemplateSpecializationKind(TSK_ExplicitSpecialization);
MarkUnusedFileScopedDecl(Specialization);
}
// Turn the given function declaration into a function template
// specialization, with the template arguments from the previous
// specialization.
// Take copies of (semantic and syntactic) template argument lists.
const TemplateArgumentList* TemplArgs = new (Context)
TemplateArgumentList(Specialization->getTemplateSpecializationArgs());
FD->setFunctionTemplateSpecialization(
Specialization->getPrimaryTemplate(), TemplArgs, /*InsertPos=*/nullptr,
SpecInfo->getTemplateSpecializationKind(),
ExplicitTemplateArgs ? &ConvertedTemplateArgs[Specialization] : nullptr);
// A function template specialization inherits the target attributes
// of its template. (We require the attributes explicitly in the
// code to match, but a template may have implicit attributes by
// virtue e.g. of being constexpr, and it passes these implicit
// attributes on to its specializations.)
if (LangOpts.CUDA)
inheritCUDATargetAttrs(FD, *Specialization->getPrimaryTemplate());
// The "previous declaration" for this function template specialization is
// the prior function template specialization.
Previous.clear();
Previous.addDecl(Specialization);
return false;
}
/// Perform semantic analysis for the given non-template member
/// specialization.
///
/// This routine performs all of the semantic analysis required for an
/// explicit member function specialization. On successful completion,
/// the function declaration \p FD will become a member function
/// specialization.
///
/// \param Member the member declaration, which will be updated to become a
/// specialization.
///
/// \param Previous the set of declarations, one of which may be specialized
/// by this function specialization; the set will be modified to contain the
/// redeclared member.
bool
Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
assert(!isa<TemplateDecl>(Member) && "Only for non-template members");
// Try to find the member we are instantiating.
NamedDecl *FoundInstantiation = nullptr;
NamedDecl *Instantiation = nullptr;
NamedDecl *InstantiatedFrom = nullptr;
MemberSpecializationInfo *MSInfo = nullptr;
if (Previous.empty()) {
// Nowhere to look anyway.
} else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Member)) {
for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
I != E; ++I) {
NamedDecl *D = (*I)->getUnderlyingDecl();
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
QualType Adjusted = Function->getType();
if (!hasExplicitCallingConv(Adjusted))
Adjusted = adjustCCAndNoReturn(Adjusted, Method->getType());
+ // This doesn't handle deduced return types, but both function
+ // declarations should be undeduced at this point.
if (Context.hasSameType(Adjusted, Method->getType())) {
FoundInstantiation = *I;
Instantiation = Method;
InstantiatedFrom = Method->getInstantiatedFromMemberFunction();
MSInfo = Method->getMemberSpecializationInfo();
break;
}
}
}
} else if (isa<VarDecl>(Member)) {
VarDecl *PrevVar;
if (Previous.isSingleResult() &&
(PrevVar = dyn_cast<VarDecl>(Previous.getFoundDecl())))
if (PrevVar->isStaticDataMember()) {
FoundInstantiation = Previous.getRepresentativeDecl();
Instantiation = PrevVar;
InstantiatedFrom = PrevVar->getInstantiatedFromStaticDataMember();
MSInfo = PrevVar->getMemberSpecializationInfo();
}
} else if (isa<RecordDecl>(Member)) {
CXXRecordDecl *PrevRecord;
if (Previous.isSingleResult() &&
(PrevRecord = dyn_cast<CXXRecordDecl>(Previous.getFoundDecl()))) {
FoundInstantiation = Previous.getRepresentativeDecl();
Instantiation = PrevRecord;
InstantiatedFrom = PrevRecord->getInstantiatedFromMemberClass();
MSInfo = PrevRecord->getMemberSpecializationInfo();
}
} else if (isa<EnumDecl>(Member)) {
EnumDecl *PrevEnum;
if (Previous.isSingleResult() &&
(PrevEnum = dyn_cast<EnumDecl>(Previous.getFoundDecl()))) {
FoundInstantiation = Previous.getRepresentativeDecl();
Instantiation = PrevEnum;
InstantiatedFrom = PrevEnum->getInstantiatedFromMemberEnum();
MSInfo = PrevEnum->getMemberSpecializationInfo();
}
}
if (!Instantiation) {
// There is no previous declaration that matches. Since member
// specializations are always out-of-line, the caller will complain about
// this mismatch later.
return false;
}
// A member specialization in a friend declaration isn't really declaring
// an explicit specialization, just identifying a specific (possibly implicit)
// specialization. Don't change the template specialization kind.
//
// FIXME: Is this really valid? Other compilers reject.
if (Member->getFriendObjectKind() != Decl::FOK_None) {
// Preserve instantiation information.
if (InstantiatedFrom && isa<CXXMethodDecl>(Member)) {
cast<CXXMethodDecl>(Member)->setInstantiationOfMemberFunction(
cast<CXXMethodDecl>(InstantiatedFrom),
cast<CXXMethodDecl>(Instantiation)->getTemplateSpecializationKind());
} else if (InstantiatedFrom && isa<CXXRecordDecl>(Member)) {
cast<CXXRecordDecl>(Member)->setInstantiationOfMemberClass(
cast<CXXRecordDecl>(InstantiatedFrom),
cast<CXXRecordDecl>(Instantiation)->getTemplateSpecializationKind());
}
Previous.clear();
Previous.addDecl(FoundInstantiation);
return false;
}
// Make sure that this is a specialization of a member.
if (!InstantiatedFrom) {
Diag(Member->getLocation(), diag::err_spec_member_not_instantiated)
<< Member;
Diag(Instantiation->getLocation(), diag::note_specialized_decl);
return true;
}
// C++ [temp.expl.spec]p6:
// If a template, a member template or the member of a class template is
// explicitly specialized then that specialization shall be declared
// before the first use of that specialization that would cause an implicit
// instantiation to take place, in every translation unit in which such a
// use occurs; no diagnostic is required.
assert(MSInfo && "Member specialization info missing?");
bool HasNoEffect = false;
if (CheckSpecializationInstantiationRedecl(Member->getLocation(),
TSK_ExplicitSpecialization,
Instantiation,
MSInfo->getTemplateSpecializationKind(),
MSInfo->getPointOfInstantiation(),
HasNoEffect))
return true;
// Check the scope of this explicit specialization.
if (CheckTemplateSpecializationScope(*this,
InstantiatedFrom,
Instantiation, Member->getLocation(),
false))
return true;
// Note that this member specialization is an "instantiation of" the
// corresponding member of the original template.
if (auto *MemberFunction = dyn_cast<FunctionDecl>(Member)) {
FunctionDecl *InstantiationFunction = cast<FunctionDecl>(Instantiation);
if (InstantiationFunction->getTemplateSpecializationKind() ==
TSK_ImplicitInstantiation) {
// Explicit specializations of member functions of class templates do not
// inherit '=delete' from the member function they are specializing.
if (InstantiationFunction->isDeleted()) {
// FIXME: This assert will not hold in the presence of modules.
assert(InstantiationFunction->getCanonicalDecl() ==
InstantiationFunction);
// FIXME: We need an update record for this AST mutation.
InstantiationFunction->setDeletedAsWritten(false);
}
}
MemberFunction->setInstantiationOfMemberFunction(
cast<CXXMethodDecl>(InstantiatedFrom), TSK_ExplicitSpecialization);
} else if (auto *MemberVar = dyn_cast<VarDecl>(Member)) {
MemberVar->setInstantiationOfStaticDataMember(
cast<VarDecl>(InstantiatedFrom), TSK_ExplicitSpecialization);
} else if (auto *MemberClass = dyn_cast<CXXRecordDecl>(Member)) {
MemberClass->setInstantiationOfMemberClass(
cast<CXXRecordDecl>(InstantiatedFrom), TSK_ExplicitSpecialization);
} else if (auto *MemberEnum = dyn_cast<EnumDecl>(Member)) {
MemberEnum->setInstantiationOfMemberEnum(
cast<EnumDecl>(InstantiatedFrom), TSK_ExplicitSpecialization);
} else {
llvm_unreachable("unknown member specialization kind");
}
// Save the caller the trouble of having to figure out which declaration
// this specialization matches.
Previous.clear();
Previous.addDecl(FoundInstantiation);
return false;
}
/// Complete the explicit specialization of a member of a class template by
/// updating the instantiated member to be marked as an explicit specialization.
///
/// \param OrigD The member declaration instantiated from the template.
/// \param Loc The location of the explicit specialization of the member.
template<typename DeclT>
static void completeMemberSpecializationImpl(Sema &S, DeclT *OrigD,
SourceLocation Loc) {
if (OrigD->getTemplateSpecializationKind() != TSK_ImplicitInstantiation)
return;
// FIXME: Inform AST mutation listeners of this AST mutation.
// FIXME: If there are multiple in-class declarations of the member (from
// multiple modules, or a declaration and later definition of a member type),
// should we update all of them?
OrigD->setTemplateSpecializationKind(TSK_ExplicitSpecialization);
OrigD->setLocation(Loc);
}
void Sema::CompleteMemberSpecialization(NamedDecl *Member,
LookupResult &Previous) {
NamedDecl *Instantiation = cast<NamedDecl>(Member->getCanonicalDecl());
if (Instantiation == Member)
return;
if (auto *Function = dyn_cast<CXXMethodDecl>(Instantiation))
completeMemberSpecializationImpl(*this, Function, Member->getLocation());
else if (auto *Var = dyn_cast<VarDecl>(Instantiation))
completeMemberSpecializationImpl(*this, Var, Member->getLocation());
else if (auto *Record = dyn_cast<CXXRecordDecl>(Instantiation))
completeMemberSpecializationImpl(*this, Record, Member->getLocation());
else if (auto *Enum = dyn_cast<EnumDecl>(Instantiation))
completeMemberSpecializationImpl(*this, Enum, Member->getLocation());
else
llvm_unreachable("unknown member specialization kind");
}
/// Check the scope of an explicit instantiation.
///
/// \returns true if a serious error occurs, false otherwise.
static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
SourceLocation InstLoc,
bool WasQualifiedName) {
DeclContext *OrigContext= D->getDeclContext()->getEnclosingNamespaceContext();
DeclContext *CurContext = S.CurContext->getRedeclContext();
if (CurContext->isRecord()) {
S.Diag(InstLoc, diag::err_explicit_instantiation_in_class)
<< D;
return true;
}
// C++11 [temp.explicit]p3:
// An explicit instantiation shall appear in an enclosing namespace of its
// template. If the name declared in the explicit instantiation is an
// unqualified name, the explicit instantiation shall appear in the
// namespace where its template is declared or, if that namespace is inline
// (7.3.1), any namespace from its enclosing namespace set.
//
// This is DR275, which we do not retroactively apply to C++98/03.
if (WasQualifiedName) {
if (CurContext->Encloses(OrigContext))
return false;
} else {
if (CurContext->InEnclosingNamespaceSetOf(OrigContext))
return false;
}
if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(OrigContext)) {
if (WasQualifiedName)
S.Diag(InstLoc,
S.getLangOpts().CPlusPlus11?
diag::err_explicit_instantiation_out_of_scope :
diag::warn_explicit_instantiation_out_of_scope_0x)
<< D << NS;
else
S.Diag(InstLoc,
S.getLangOpts().CPlusPlus11?
diag::err_explicit_instantiation_unqualified_wrong_namespace :
diag::warn_explicit_instantiation_unqualified_wrong_namespace_0x)
<< D << NS;
} else
S.Diag(InstLoc,
S.getLangOpts().CPlusPlus11?
diag::err_explicit_instantiation_must_be_global :
diag::warn_explicit_instantiation_must_be_global_0x)
<< D;
S.Diag(D->getLocation(), diag::note_explicit_instantiation_here);
return false;
}
/// Determine whether the given scope specifier has a template-id in it.
static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
if (!SS.isSet())
return false;
// C++11 [temp.explicit]p3:
// If the explicit instantiation is for a member function, a member class
// or a static data member of a class template specialization, the name of
// the class template specialization in the qualified-id for the member
// name shall be a simple-template-id.
//
// C++98 has the same restriction, just worded differently.
for (NestedNameSpecifier *NNS = SS.getScopeRep(); NNS;
NNS = NNS->getPrefix())
if (const Type *T = NNS->getAsType())
if (isa<TemplateSpecializationType>(T))
return true;
return false;
}
/// Make a dllexport or dllimport attr on a class template specialization take
/// effect.
static void dllExportImportClassTemplateSpecialization(
Sema &S, ClassTemplateSpecializationDecl *Def) {
auto *A = cast_or_null<InheritableAttr>(getDLLAttr(Def));
assert(A && "dllExportImportClassTemplateSpecialization called "
"on Def without dllexport or dllimport");
// We reject explicit instantiations in class scope, so there should
// never be any delayed exported classes to worry about.
assert(S.DelayedDllExportClasses.empty() &&
"delayed exports present at explicit instantiation");
S.checkClassLevelDLLAttribute(Def);
// Propagate attribute to base class templates.
for (auto &B : Def->bases()) {
if (auto *BT = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
B.getType()->getAsCXXRecordDecl()))
S.propagateDLLAttrToBaseClassTemplate(Def, A, BT, B.getLocStart());
}
S.referenceDLLExportedClassMethods();
}
// Explicit instantiation of a class template specialization
DeclResult Sema::ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy TemplateD, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr) {
// Find the class template we're specializing
TemplateName Name = TemplateD.get();
TemplateDecl *TD = Name.getAsTemplateDecl();
// Check that the specialization uses the same tag kind as the
// original template.
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
assert(Kind != TTK_Enum &&
"Invalid enum tag in class template explicit instantiation!");
ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(TD);
if (!ClassTemplate) {
NonTagKind NTK = getNonTagTypeDeclKind(TD, Kind);
Diag(TemplateNameLoc, diag::err_tag_reference_non_tag) << TD << NTK << Kind;
Diag(TD->getLocation(), diag::note_previous_use);
return true;
}
if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(),
Kind, /*isDefinition*/false, KWLoc,
ClassTemplate->getIdentifier())) {
Diag(KWLoc, diag::err_use_with_wrong_tag)
<< ClassTemplate
<< FixItHint::CreateReplacement(KWLoc,
ClassTemplate->getTemplatedDecl()->getKindName());
Diag(ClassTemplate->getTemplatedDecl()->getLocation(),
diag::note_previous_use);
Kind = ClassTemplate->getTemplatedDecl()->getTagKind();
}
// C++0x [temp.explicit]p2:
// There are two forms of explicit instantiation: an explicit instantiation
// definition and an explicit instantiation declaration. An explicit
// instantiation declaration begins with the extern keyword. [...]
TemplateSpecializationKind TSK = ExternLoc.isInvalid()
? TSK_ExplicitInstantiationDefinition
: TSK_ExplicitInstantiationDeclaration;
if (TSK == TSK_ExplicitInstantiationDeclaration) {
// Check for dllexport class template instantiation declarations.
for (const ParsedAttr &AL : Attr) {
if (AL.getKind() == ParsedAttr::AT_DLLExport) {
Diag(ExternLoc,
diag::warn_attribute_dllexport_explicit_instantiation_decl);
Diag(AL.getLoc(), diag::note_attribute);
break;
}
}
if (auto *A = ClassTemplate->getTemplatedDecl()->getAttr<DLLExportAttr>()) {
Diag(ExternLoc,
diag::warn_attribute_dllexport_explicit_instantiation_decl);
Diag(A->getLocation(), diag::note_attribute);
}
}
// In MSVC mode, dllimported explicit instantiation definitions are treated as
// instantiation declarations for most purposes.
bool DLLImportExplicitInstantiationDef = false;
if (TSK == TSK_ExplicitInstantiationDefinition &&
Context.getTargetInfo().getCXXABI().isMicrosoft()) {
// Check for dllimport class template instantiation definitions.
bool DLLImport =
ClassTemplate->getTemplatedDecl()->getAttr<DLLImportAttr>();
for (const ParsedAttr &AL : Attr) {
if (AL.getKind() == ParsedAttr::AT_DLLImport)
DLLImport = true;
if (AL.getKind() == ParsedAttr::AT_DLLExport) {
// dllexport trumps dllimport here.
DLLImport = false;
break;
}
}
if (DLLImport) {
TSK = TSK_ExplicitInstantiationDeclaration;
DLLImportExplicitInstantiationDef = true;
}
}
// Translate the parser's template argument list in our AST format.
TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
translateTemplateArguments(TemplateArgsIn, TemplateArgs);
// Check that the template argument list is well-formed for this
// template.
SmallVector<TemplateArgument, 4> Converted;
if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc,
TemplateArgs, false, Converted))
return true;
// Find the class template specialization declaration that
// corresponds to these arguments.
void *InsertPos = nullptr;
ClassTemplateSpecializationDecl *PrevDecl
= ClassTemplate->findSpecialization(Converted, InsertPos);
TemplateSpecializationKind PrevDecl_TSK
= PrevDecl ? PrevDecl->getTemplateSpecializationKind() : TSK_Undeclared;
// C++0x [temp.explicit]p2:
// [...] An explicit instantiation shall appear in an enclosing
// namespace of its template. [...]
//
// This is C++ DR 275.
if (CheckExplicitInstantiationScope(*this, ClassTemplate, TemplateNameLoc,
SS.isSet()))
return true;
ClassTemplateSpecializationDecl *Specialization = nullptr;
bool HasNoEffect = false;
if (PrevDecl) {
if (CheckSpecializationInstantiationRedecl(TemplateNameLoc, TSK,
PrevDecl, PrevDecl_TSK,
PrevDecl->getPointOfInstantiation(),
HasNoEffect))
return PrevDecl;
// Even though HasNoEffect == true means that this explicit instantiation
// has no effect on semantics, we go on to put its syntax in the AST.
if (PrevDecl_TSK == TSK_ImplicitInstantiation ||
PrevDecl_TSK == TSK_Undeclared) {
// Since the only prior class template specialization with these
// arguments was referenced but not declared, reuse that
// declaration node as our own, updating the source location
// for the template name to reflect our new declaration.
// (Other source locations will be updated later.)
Specialization = PrevDecl;
Specialization->setLocation(TemplateNameLoc);
PrevDecl = nullptr;
}
if (PrevDecl_TSK == TSK_ExplicitInstantiationDeclaration &&
DLLImportExplicitInstantiationDef) {
// The new specialization might add a dllimport attribute.
HasNoEffect = false;
}
}
if (!Specialization) {
// Create a new class template specialization declaration node for
// this explicit specialization.
Specialization
= ClassTemplateSpecializationDecl::Create(Context, Kind,
ClassTemplate->getDeclContext(),
KWLoc, TemplateNameLoc,
ClassTemplate,
Converted,
PrevDecl);
SetNestedNameSpecifier(Specialization, SS);
if (!HasNoEffect && !PrevDecl) {
// Insert the new specialization.
ClassTemplate->AddSpecialization(Specialization, InsertPos);
}
}
// Build the fully-sugared type for this explicit instantiation as
// the user wrote in the explicit instantiation itself. This means
// that we'll pretty-print the type retrieved from the
// specialization's declaration the way that the user actually wrote
// the explicit instantiation, rather than formatting the name based
// on the "canonical" representation used to store the template
// arguments in the specialization.
TypeSourceInfo *WrittenTy
= Context.getTemplateSpecializationTypeInfo(Name, TemplateNameLoc,
TemplateArgs,
Context.getTypeDeclType(Specialization));
Specialization->setTypeAsWritten(WrittenTy);
// Set source locations for keywords.
Specialization->setExternLoc(ExternLoc);
Specialization->setTemplateKeywordLoc(TemplateLoc);
Specialization->setBraceRange(SourceRange());
bool PreviouslyDLLExported = Specialization->hasAttr<DLLExportAttr>();
ProcessDeclAttributeList(S, Specialization, Attr);
// Add the explicit instantiation into its lexical context. However,
// since explicit instantiations are never found by name lookup, we
// just put it into the declaration context directly.
Specialization->setLexicalDeclContext(CurContext);
CurContext->addDecl(Specialization);
// Syntax is now OK, so return if it has no other effect on semantics.
if (HasNoEffect) {
// Set the template specialization kind.
Specialization->setTemplateSpecializationKind(TSK);
return Specialization;
}
// C++ [temp.explicit]p3:
// A definition of a class template or class member template
// shall be in scope at the point of the explicit instantiation of
// the class template or class member template.
//
// This check comes when we actually try to perform the
// instantiation.
ClassTemplateSpecializationDecl *Def
= cast_or_null<ClassTemplateSpecializationDecl>(
Specialization->getDefinition());
if (!Def)
InstantiateClassTemplateSpecialization(TemplateNameLoc, Specialization, TSK);
else if (TSK == TSK_ExplicitInstantiationDefinition) {
MarkVTableUsed(TemplateNameLoc, Specialization, true);
Specialization->setPointOfInstantiation(Def->getPointOfInstantiation());
}
// Instantiate the members of this class template specialization.
Def = cast_or_null<ClassTemplateSpecializationDecl>(
Specialization->getDefinition());
if (Def) {
TemplateSpecializationKind Old_TSK = Def->getTemplateSpecializationKind();
// Fix a TSK_ExplicitInstantiationDeclaration followed by a
// TSK_ExplicitInstantiationDefinition
if (Old_TSK == TSK_ExplicitInstantiationDeclaration &&
(TSK == TSK_ExplicitInstantiationDefinition ||
DLLImportExplicitInstantiationDef)) {
// FIXME: Need to notify the ASTMutationListener that we did this.
Def->setTemplateSpecializationKind(TSK);
if (!getDLLAttr(Def) && getDLLAttr(Specialization) &&
(Context.getTargetInfo().getCXXABI().isMicrosoft() ||
Context.getTargetInfo().getTriple().isWindowsItaniumEnvironment())) {
// In the MS ABI, an explicit instantiation definition can add a dll
// attribute to a template with a previous instantiation declaration.
// MinGW doesn't allow this.
auto *A = cast<InheritableAttr>(
getDLLAttr(Specialization)->clone(getASTContext()));
A->setInherited(true);
Def->addAttr(A);
dllExportImportClassTemplateSpecialization(*this, Def);
}
}
// Fix a TSK_ImplicitInstantiation followed by a
// TSK_ExplicitInstantiationDefinition
bool NewlyDLLExported =
!PreviouslyDLLExported && Specialization->hasAttr<DLLExportAttr>();
if (Old_TSK == TSK_ImplicitInstantiation && NewlyDLLExported &&
(Context.getTargetInfo().getCXXABI().isMicrosoft() ||
Context.getTargetInfo().getTriple().isWindowsItaniumEnvironment())) {
// In the MS ABI, an explicit instantiation definition can add a dll
// attribute to a template with a previous implicit instantiation.
// MinGW doesn't allow this. We limit clang to only adding dllexport, to
// avoid potentially strange codegen behavior. For example, if we extend
// this conditional to dllimport, and we have a source file calling a
// method on an implicitly instantiated template class instance and then
// declaring a dllimport explicit instantiation definition for the same
// template class, the codegen for the method call will not respect the
// dllimport, while it will with cl. The Def will already have the DLL
// attribute, since the Def and Specialization will be the same in the
// case of Old_TSK == TSK_ImplicitInstantiation, and we already added the
// attribute to the Specialization; we just need to make it take effect.
assert(Def == Specialization &&
"Def and Specialization should match for implicit instantiation");
dllExportImportClassTemplateSpecialization(*this, Def);
}
// Set the template specialization kind. Make sure it is set before
// instantiating the members which will trigger ASTConsumer callbacks.
Specialization->setTemplateSpecializationKind(TSK);
InstantiateClassTemplateSpecializationMembers(TemplateNameLoc, Def, TSK);
} else {
// Set the template specialization kind.
Specialization->setTemplateSpecializationKind(TSK);
}
return Specialization;
}
// Explicit instantiation of a member class of a class template.
DeclResult
Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc, unsigned TagSpec,
SourceLocation KWLoc, CXXScopeSpec &SS,
IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr) {
bool Owned = false;
bool IsDependent = false;
Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference,
KWLoc, SS, Name, NameLoc, Attr, AS_none,
/*ModulePrivateLoc=*/SourceLocation(),
MultiTemplateParamsArg(), Owned, IsDependent,
SourceLocation(), false, TypeResult(),
/*IsTypeSpecifier*/false,
/*IsTemplateParamOrArg*/false);
assert(!IsDependent && "explicit instantiation of dependent name not yet handled");
if (!TagD)
return true;
TagDecl *Tag = cast<TagDecl>(TagD);
assert(!Tag->isEnum() && "shouldn't see enumerations here");
if (Tag->isInvalidDecl())
return true;
CXXRecordDecl *Record = cast<CXXRecordDecl>(Tag);
CXXRecordDecl *Pattern = Record->getInstantiatedFromMemberClass();
if (!Pattern) {
Diag(TemplateLoc, diag::err_explicit_instantiation_nontemplate_type)
<< Context.getTypeDeclType(Record);
Diag(Record->getLocation(), diag::note_nontemplate_decl_here);
return true;
}
// C++0x [temp.explicit]p2:
// If the explicit instantiation is for a class or member class, the
// elaborated-type-specifier in the declaration shall include a
// simple-template-id.
//
// C++98 has the same restriction, just worded differently.
if (!ScopeSpecifierHasTemplateId(SS))
Diag(TemplateLoc, diag::ext_explicit_instantiation_without_qualified_id)
<< Record << SS.getRange();
// C++0x [temp.explicit]p2:
// There are two forms of explicit instantiation: an explicit instantiation
// definition and an explicit instantiation declaration. An explicit
// instantiation declaration begins with the extern keyword. [...]
TemplateSpecializationKind TSK
= ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition
: TSK_ExplicitInstantiationDeclaration;
// C++0x [temp.explicit]p2:
// [...] An explicit instantiation shall appear in an enclosing
// namespace of its template. [...]
//
// This is C++ DR 275.
CheckExplicitInstantiationScope(*this, Record, NameLoc, true);
// Verify that it is okay to explicitly instantiate here.
CXXRecordDecl *PrevDecl
= cast_or_null<CXXRecordDecl>(Record->getPreviousDecl());
if (!PrevDecl && Record->getDefinition())
PrevDecl = Record;
if (PrevDecl) {
MemberSpecializationInfo *MSInfo = PrevDecl->getMemberSpecializationInfo();
bool HasNoEffect = false;
assert(MSInfo && "No member specialization information?");
if (CheckSpecializationInstantiationRedecl(TemplateLoc, TSK,
PrevDecl,
MSInfo->getTemplateSpecializationKind(),
MSInfo->getPointOfInstantiation(),
HasNoEffect))
return true;
if (HasNoEffect)
return TagD;
}
CXXRecordDecl *RecordDef
= cast_or_null<CXXRecordDecl>(Record->getDefinition());
if (!RecordDef) {
// C++ [temp.explicit]p3:
// A definition of a member class of a class template shall be in scope
// at the point of an explicit instantiation of the member class.
CXXRecordDecl *Def
= cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
if (!Def) {
Diag(TemplateLoc, diag::err_explicit_instantiation_undefined_member)
<< 0 << Record->getDeclName() << Record->getDeclContext();
Diag(Pattern->getLocation(), diag::note_forward_declaration)
<< Pattern;
return true;
} else {
if (InstantiateClass(NameLoc, Record, Def,
getTemplateInstantiationArgs(Record),
TSK))
return true;
RecordDef = cast_or_null<CXXRecordDecl>(Record->getDefinition());
if (!RecordDef)
return true;
}
}
// Instantiate all of the members of the class.
InstantiateClassMembers(NameLoc, RecordDef,
getTemplateInstantiationArgs(Record), TSK);
if (TSK == TSK_ExplicitInstantiationDefinition)
MarkVTableUsed(NameLoc, RecordDef, true);
// FIXME: We don't have any representation for explicit instantiations of
// member classes. Such a representation is not needed for compilation, but it
// should be available for clients that want to see all of the declarations in
// the source code.
return TagD;
}
DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D) {
// Explicit instantiations always require a name.
// TODO: check if/when DNInfo should replace Name.
DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
DeclarationName Name = NameInfo.getName();
if (!Name) {
if (!D.isInvalidType())
Diag(D.getDeclSpec().getLocStart(),
diag::err_explicit_instantiation_requires_name)
<< D.getDeclSpec().getSourceRange()
<< D.getSourceRange();
return true;
}
// The scope passed in may not be a decl scope. Zip up the scope tree until
// we find one that is.
while ((S->getFlags() & Scope::DeclScope) == 0 ||
(S->getFlags() & Scope::TemplateParamScope) != 0)
S = S->getParent();
// Determine the type of the declaration.
TypeSourceInfo *T = GetTypeForDeclarator(D, S);
QualType R = T->getType();
if (R.isNull())
return true;
// C++ [dcl.stc]p1:
// A storage-class-specifier shall not be specified in [...] an explicit
// instantiation (14.7.2) directive.
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) {
Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_of_typedef)
<< Name;
return true;
} else if (D.getDeclSpec().getStorageClassSpec()
!= DeclSpec::SCS_unspecified) {
// Complain about then remove the storage class specifier.
Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_storage_class)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
D.getMutableDeclSpec().ClearStorageClassSpecs();
}
// C++0x [temp.explicit]p1:
// [...] An explicit instantiation of a function template shall not use the
// inline or constexpr specifiers.
// Presumably, this also applies to member functions of class templates as
// well.
if (D.getDeclSpec().isInlineSpecified())
Diag(D.getDeclSpec().getInlineSpecLoc(),
getLangOpts().CPlusPlus11 ?
diag::err_explicit_instantiation_inline :
diag::warn_explicit_instantiation_inline_0x)
<< FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
if (D.getDeclSpec().isConstexprSpecified() && R->isFunctionType())
// FIXME: Add a fix-it to remove the 'constexpr' and add a 'const' if one is
// not already specified.
Diag(D.getDeclSpec().getConstexprSpecLoc(),
diag::err_explicit_instantiation_constexpr);
// A deduction guide is not on the list of entities that can be explicitly
// instantiated.
if (Name.getNameKind() == DeclarationName::CXXDeductionGuideName) {
Diag(D.getDeclSpec().getLocStart(), diag::err_deduction_guide_specialized)
<< /*explicit instantiation*/ 0;
return true;
}
// C++0x [temp.explicit]p2:
// There are two forms of explicit instantiation: an explicit instantiation
// definition and an explicit instantiation declaration. An explicit
// instantiation declaration begins with the extern keyword. [...]
TemplateSpecializationKind TSK
= ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition
: TSK_ExplicitInstantiationDeclaration;
LookupResult Previous(*this, NameInfo, LookupOrdinaryName);
LookupParsedName(Previous, S, &D.getCXXScopeSpec());
if (!R->isFunctionType()) {
// C++ [temp.explicit]p1:
// A [...] static data member of a class template can be explicitly
// instantiated from the member definition associated with its class
// template.
// C++1y [temp.explicit]p1:
// A [...] variable [...] template specialization can be explicitly
// instantiated from its template.
if (Previous.isAmbiguous())
return true;
VarDecl *Prev = Previous.getAsSingle<VarDecl>();
VarTemplateDecl *PrevTemplate = Previous.getAsSingle<VarTemplateDecl>();
if (!PrevTemplate) {
if (!Prev || !Prev->isStaticDataMember()) {
// We expect to see a data data member here.
Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_not_known)
<< Name;
for (LookupResult::iterator P = Previous.begin(), PEnd = Previous.end();
P != PEnd; ++P)
Diag((*P)->getLocation(), diag::note_explicit_instantiation_here);
return true;
}
if (!Prev->getInstantiatedFromStaticDataMember()) {
// FIXME: Check for explicit specialization?
Diag(D.getIdentifierLoc(),
diag::err_explicit_instantiation_data_member_not_instantiated)
<< Prev;
Diag(Prev->getLocation(), diag::note_explicit_instantiation_here);
// FIXME: Can we provide a note showing where this was declared?
return true;
}
} else {
// Explicitly instantiate a variable template.
// C++1y [dcl.spec.auto]p6:
// ... A program that uses auto or decltype(auto) in a context not
// explicitly allowed in this section is ill-formed.
//
// This includes auto-typed variable template instantiations.
if (R->isUndeducedType()) {
Diag(T->getTypeLoc().getLocStart(),
diag::err_auto_not_allowed_var_inst);
return true;
}
if (D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId) {
// C++1y [temp.explicit]p3:
// If the explicit instantiation is for a variable, the unqualified-id
// in the declaration shall be a template-id.
Diag(D.getIdentifierLoc(),
diag::err_explicit_instantiation_without_template_id)
<< PrevTemplate;
Diag(PrevTemplate->getLocation(),
diag::note_explicit_instantiation_here);
return true;
}
// Translate the parser's template argument list into our AST format.
TemplateArgumentListInfo TemplateArgs =
makeTemplateArgumentListInfo(*this, *D.getName().TemplateId);
DeclResult Res = CheckVarTemplateId(PrevTemplate, TemplateLoc,
D.getIdentifierLoc(), TemplateArgs);
if (Res.isInvalid())
return true;
// Ignore access control bits, we don't need them for redeclaration
// checking.
Prev = cast<VarDecl>(Res.get());
}
// C++0x [temp.explicit]p2:
// If the explicit instantiation is for a member function, a member class
// or a static data member of a class template specialization, the name of
// the class template specialization in the qualified-id for the member
// name shall be a simple-template-id.
//
// C++98 has the same restriction, just worded differently.
//
// This does not apply to variable template specializations, where the
// template-id is in the unqualified-id instead.
if (!ScopeSpecifierHasTemplateId(D.getCXXScopeSpec()) && !PrevTemplate)
Diag(D.getIdentifierLoc(),
diag::ext_explicit_instantiation_without_qualified_id)
<< Prev << D.getCXXScopeSpec().getRange();
// Check the scope of this explicit instantiation.
CheckExplicitInstantiationScope(*this, Prev, D.getIdentifierLoc(), true);
// Verify that it is okay to explicitly instantiate here.
TemplateSpecializationKind PrevTSK = Prev->getTemplateSpecializationKind();
SourceLocation POI = Prev->getPointOfInstantiation();
bool HasNoEffect = false;
if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK, Prev,
PrevTSK, POI, HasNoEffect))
return true;
if (!HasNoEffect) {
// Instantiate static data member or variable template.
Prev->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
if (PrevTemplate) {
// Merge attributes.
ProcessDeclAttributeList(S, Prev, D.getDeclSpec().getAttributes());
}
if (TSK == TSK_ExplicitInstantiationDefinition)
InstantiateVariableDefinition(D.getIdentifierLoc(), Prev);
}
// Check the new variable specialization against the parsed input.
if (PrevTemplate && Prev && !Context.hasSameType(Prev->getType(), R)) {
Diag(T->getTypeLoc().getLocStart(),
diag::err_invalid_var_template_spec_type)
<< 0 << PrevTemplate << R << Prev->getType();
Diag(PrevTemplate->getLocation(), diag::note_template_declared_here)
<< 2 << PrevTemplate->getDeclName();
return true;
}
// FIXME: Create an ExplicitInstantiation node?
return (Decl*) nullptr;
}
// If the declarator is a template-id, translate the parser's template
// argument list into our AST format.
bool HasExplicitTemplateArgs = false;
TemplateArgumentListInfo TemplateArgs;
if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
TemplateArgs = makeTemplateArgumentListInfo(*this, *D.getName().TemplateId);
HasExplicitTemplateArgs = true;
}
// C++ [temp.explicit]p1:
// A [...] function [...] can be explicitly instantiated from its template.
// A member function [...] of a class template can be explicitly
// instantiated from the member definition associated with its class
// template.
UnresolvedSet<8> TemplateMatches;
FunctionDecl *NonTemplateMatch = nullptr;
TemplateSpecCandidateSet FailedCandidates(D.getIdentifierLoc());
for (LookupResult::iterator P = Previous.begin(), PEnd = Previous.end();
P != PEnd; ++P) {
NamedDecl *Prev = *P;
if (!HasExplicitTemplateArgs) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Prev)) {
QualType Adjusted = adjustCCAndNoReturn(R, Method->getType(),
/*AdjustExceptionSpec*/true);
if (Context.hasSameUnqualifiedType(Method->getType(), Adjusted)) {
if (Method->getPrimaryTemplate()) {
TemplateMatches.addDecl(Method, P.getAccess());
} else {
// FIXME: Can this assert ever happen? Needs a test.
assert(!NonTemplateMatch && "Multiple NonTemplateMatches");
NonTemplateMatch = Method;
}
}
}
}
FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Prev);
if (!FunTmpl)
continue;
TemplateDeductionInfo Info(FailedCandidates.getLocation());
FunctionDecl *Specialization = nullptr;
if (TemplateDeductionResult TDK
= DeduceTemplateArguments(FunTmpl,
(HasExplicitTemplateArgs ? &TemplateArgs
: nullptr),
R, Specialization, Info)) {
// Keep track of almost-matches.
FailedCandidates.addCandidate()
.set(P.getPair(), FunTmpl->getTemplatedDecl(),
MakeDeductionFailureInfo(Context, TDK, Info));
(void)TDK;
continue;
}
// Target attributes are part of the cuda function signature, so
// the cuda target of the instantiated function must match that of its
// template. Given that C++ template deduction does not take
// target attributes into account, we reject candidates here that
// have a different target.
if (LangOpts.CUDA &&
IdentifyCUDATarget(Specialization,
/* IgnoreImplicitHDAttributes = */ true) !=
IdentifyCUDATarget(D.getDeclSpec().getAttributes())) {
FailedCandidates.addCandidate().set(
P.getPair(), FunTmpl->getTemplatedDecl(),
MakeDeductionFailureInfo(Context, TDK_CUDATargetMismatch, Info));
continue;
}
TemplateMatches.addDecl(Specialization, P.getAccess());
}
FunctionDecl *Specialization = NonTemplateMatch;
if (!Specialization) {
// Find the most specialized function template specialization.
UnresolvedSetIterator Result = getMostSpecialized(
TemplateMatches.begin(), TemplateMatches.end(), FailedCandidates,
D.getIdentifierLoc(),
PDiag(diag::err_explicit_instantiation_not_known) << Name,
PDiag(diag::err_explicit_instantiation_ambiguous) << Name,
PDiag(diag::note_explicit_instantiation_candidate));
if (Result == TemplateMatches.end())
return true;
// Ignore access control bits, we don't need them for redeclaration checking.
Specialization = cast<FunctionDecl>(*Result);
}
// C++11 [except.spec]p4
// In an explicit instantiation an exception-specification may be specified,
// but is not required.
// If an exception-specification is specified in an explicit instantiation
// directive, it shall be compatible with the exception-specifications of
// other declarations of that function.
if (auto *FPT = R->getAs<FunctionProtoType>())
if (FPT->hasExceptionSpec()) {
unsigned DiagID =
diag::err_mismatched_exception_spec_explicit_instantiation;
if (getLangOpts().MicrosoftExt)
DiagID = diag::ext_mismatched_exception_spec_explicit_instantiation;
bool Result = CheckEquivalentExceptionSpec(
PDiag(DiagID) << Specialization->getType(),
PDiag(diag::note_explicit_instantiation_here),
Specialization->getType()->getAs<FunctionProtoType>(),
Specialization->getLocation(), FPT, D.getLocStart());
// In Microsoft mode, mismatching exception specifications just cause a
// warning.
if (!getLangOpts().MicrosoftExt && Result)
return true;
}
if (Specialization->getTemplateSpecializationKind() == TSK_Undeclared) {
Diag(D.getIdentifierLoc(),
diag::err_explicit_instantiation_member_function_not_instantiated)
<< Specialization
<< (Specialization->getTemplateSpecializationKind() ==
TSK_ExplicitSpecialization);
Diag(Specialization->getLocation(), diag::note_explicit_instantiation_here);
return true;
}
FunctionDecl *PrevDecl = Specialization->getPreviousDecl();
if (!PrevDecl && Specialization->isThisDeclarationADefinition())
PrevDecl = Specialization;
if (PrevDecl) {
bool HasNoEffect = false;
if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK,
PrevDecl,
PrevDecl->getTemplateSpecializationKind(),
PrevDecl->getPointOfInstantiation(),
HasNoEffect))
return true;
// FIXME: We may still want to build some representation of this
// explicit specialization.
if (HasNoEffect)
return (Decl*) nullptr;
}
ProcessDeclAttributeList(S, Specialization, D.getDeclSpec().getAttributes());
// In MSVC mode, dllimported explicit instantiation definitions are treated as
// instantiation declarations.
if (TSK == TSK_ExplicitInstantiationDefinition &&
Specialization->hasAttr<DLLImportAttr>() &&
Context.getTargetInfo().getCXXABI().isMicrosoft())
TSK = TSK_ExplicitInstantiationDeclaration;
Specialization->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
if (Specialization->isDefined()) {
// Let the ASTConsumer know that this function has been explicitly
// instantiated now, and its linkage might have changed.
Consumer.HandleTopLevelDecl(DeclGroupRef(Specialization));
} else if (TSK == TSK_ExplicitInstantiationDefinition)
InstantiateFunctionDefinition(D.getIdentifierLoc(), Specialization);
// C++0x [temp.explicit]p2:
// If the explicit instantiation is for a member function, a member class
// or a static data member of a class template specialization, the name of
// the class template specialization in the qualified-id for the member
// name shall be a simple-template-id.
//
// C++98 has the same restriction, just worded differently.
FunctionTemplateDecl *FunTmpl = Specialization->getPrimaryTemplate();
if (D.getName().getKind() != UnqualifiedIdKind::IK_TemplateId && !FunTmpl &&
D.getCXXScopeSpec().isSet() &&
!ScopeSpecifierHasTemplateId(D.getCXXScopeSpec()))
Diag(D.getIdentifierLoc(),
diag::ext_explicit_instantiation_without_qualified_id)
<< Specialization << D.getCXXScopeSpec().getRange();
CheckExplicitInstantiationScope(*this,
FunTmpl? (NamedDecl *)FunTmpl
: Specialization->getInstantiatedFromMemberFunction(),
D.getIdentifierLoc(),
D.getCXXScopeSpec().isSet());
// FIXME: Create some kind of ExplicitInstantiationDecl here.
return (Decl*) nullptr;
}
TypeResult
Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
const CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation TagLoc, SourceLocation NameLoc) {
// This has to hold, because SS is expected to be defined.
assert(Name && "Expected a name in a dependent tag");
NestedNameSpecifier *NNS = SS.getScopeRep();
if (!NNS)
return true;
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
if (TUK == TUK_Declaration || TUK == TUK_Definition) {
Diag(NameLoc, diag::err_dependent_tag_decl)
<< (TUK == TUK_Definition) << Kind << SS.getRange();
return true;
}
// Create the resulting type.
ElaboratedTypeKeyword Kwd = TypeWithKeyword::getKeywordForTagTypeKind(Kind);
QualType Result = Context.getDependentNameType(Kwd, NNS, Name);
// Create type-source location information for this type.
TypeLocBuilder TLB;
DependentNameTypeLoc TL = TLB.push<DependentNameTypeLoc>(Result);
TL.setElaboratedKeywordLoc(TagLoc);
TL.setQualifierLoc(SS.getWithLocInContext(Context));
TL.setNameLoc(NameLoc);
return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
}
TypeResult
Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc) {
if (SS.isInvalid())
return true;
if (TypenameLoc.isValid() && S && !S->getTemplateParamParent())
Diag(TypenameLoc,
getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_typename_outside_of_template :
diag::ext_typename_outside_of_template)
<< FixItHint::CreateRemoval(TypenameLoc);
NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
QualType T = CheckTypenameType(TypenameLoc.isValid()? ETK_Typename : ETK_None,
TypenameLoc, QualifierLoc, II, IdLoc);
if (T.isNull())
return true;
TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
if (isa<DependentNameType>(T)) {
DependentNameTypeLoc TL = TSI->getTypeLoc().castAs<DependentNameTypeLoc>();
TL.setElaboratedKeywordLoc(TypenameLoc);
TL.setQualifierLoc(QualifierLoc);
TL.setNameLoc(IdLoc);
} else {
ElaboratedTypeLoc TL = TSI->getTypeLoc().castAs<ElaboratedTypeLoc>();
TL.setElaboratedKeywordLoc(TypenameLoc);
TL.setQualifierLoc(QualifierLoc);
TL.getNamedTypeLoc().castAs<TypeSpecTypeLoc>().setNameLoc(IdLoc);
}
return CreateParsedType(T, TSI);
}
TypeResult
Sema::ActOnTypenameType(Scope *S,
SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateIn,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc) {
if (TypenameLoc.isValid() && S && !S->getTemplateParamParent())
Diag(TypenameLoc,
getLangOpts().CPlusPlus11 ?
diag::warn_cxx98_compat_typename_outside_of_template :
diag::ext_typename_outside_of_template)
<< FixItHint::CreateRemoval(TypenameLoc);
// Strangely, non-type results are not ignored by this lookup, so the
// program is ill-formed if it finds an injected-class-name.
if (TypenameLoc.isValid()) {
auto *LookupRD =
dyn_cast_or_null<CXXRecordDecl>(computeDeclContext(SS, false));
if (LookupRD && LookupRD->getIdentifier() == TemplateII) {
Diag(TemplateIILoc,
diag::ext_out_of_line_qualified_id_type_names_constructor)
<< TemplateII << 0 /*injected-class-name used as template name*/
<< (TemplateKWLoc.isValid() ? 1 : 0 /*'template'/'typename' keyword*/);
}
}
// Translate the parser's template argument list in our AST format.
TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
translateTemplateArguments(TemplateArgsIn, TemplateArgs);
TemplateName Template = TemplateIn.get();
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
// Construct a dependent template specialization type.
assert(DTN && "dependent template has non-dependent name?");
assert(DTN->getQualifier() == SS.getScopeRep());
QualType T = Context.getDependentTemplateSpecializationType(ETK_Typename,
DTN->getQualifier(),
DTN->getIdentifier(),
TemplateArgs);
// Create source-location information for this type.
TypeLocBuilder Builder;
DependentTemplateSpecializationTypeLoc SpecTL
= Builder.push<DependentTemplateSpecializationTypeLoc>(T);
SpecTL.setElaboratedKeywordLoc(TypenameLoc);
SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateIILoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
QualType T = CheckTemplateIdType(Template, TemplateIILoc, TemplateArgs);
if (T.isNull())
return true;
// Provide source-location information for the template specialization type.
TypeLocBuilder Builder;
TemplateSpecializationTypeLoc SpecTL
= Builder.push<TemplateSpecializationTypeLoc>(T);
SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
SpecTL.setTemplateNameLoc(TemplateIILoc);
SpecTL.setLAngleLoc(LAngleLoc);
SpecTL.setRAngleLoc(RAngleLoc);
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
T = Context.getElaboratedType(ETK_Typename, SS.getScopeRep(), T);
ElaboratedTypeLoc TL = Builder.push<ElaboratedTypeLoc>(T);
TL.setElaboratedKeywordLoc(TypenameLoc);
TL.setQualifierLoc(SS.getWithLocInContext(Context));
TypeSourceInfo *TSI = Builder.getTypeSourceInfo(Context, T);
return CreateParsedType(T, TSI);
}
/// Determine whether this failed name lookup should be treated as being
/// disabled by a usage of std::enable_if.
static bool isEnableIf(NestedNameSpecifierLoc NNS, const IdentifierInfo &II,
SourceRange &CondRange, Expr *&Cond) {
// We must be looking for a ::type...
if (!II.isStr("type"))
return false;
// ... within an explicitly-written template specialization...
if (!NNS || !NNS.getNestedNameSpecifier()->getAsType())
return false;
TypeLoc EnableIfTy = NNS.getTypeLoc();
TemplateSpecializationTypeLoc EnableIfTSTLoc =
EnableIfTy.getAs<TemplateSpecializationTypeLoc>();
if (!EnableIfTSTLoc || EnableIfTSTLoc.getNumArgs() == 0)
return false;
const TemplateSpecializationType *EnableIfTST = EnableIfTSTLoc.getTypePtr();
// ... which names a complete class template declaration...
const TemplateDecl *EnableIfDecl =
EnableIfTST->getTemplateName().getAsTemplateDecl();
if (!EnableIfDecl || EnableIfTST->isIncompleteType())
return false;
// ... called "enable_if".
const IdentifierInfo *EnableIfII =
EnableIfDecl->getDeclName().getAsIdentifierInfo();
if (!EnableIfII || !EnableIfII->isStr("enable_if"))
return false;
// Assume the first template argument is the condition.
CondRange = EnableIfTSTLoc.getArgLoc(0).getSourceRange();
// Dig out the condition.
Cond = nullptr;
if (EnableIfTSTLoc.getArgLoc(0).getArgument().getKind()
!= TemplateArgument::Expression)
return true;
Cond = EnableIfTSTLoc.getArgLoc(0).getSourceExpression();
// Ignore Boolean literals; they add no value.
if (isa<CXXBoolLiteralExpr>(Cond->IgnoreParenCasts()))
Cond = nullptr;
return true;
}
/// Build the type that describes a C++ typename specifier,
/// e.g., "typename T::type".
QualType
Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc) {
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
DeclContext *Ctx = computeDeclContext(SS);
if (!Ctx) {
// If the nested-name-specifier is dependent and couldn't be
// resolved to a type, build a typename type.
assert(QualifierLoc.getNestedNameSpecifier()->isDependent());
return Context.getDependentNameType(Keyword,
QualifierLoc.getNestedNameSpecifier(),
&II);
}
// If the nested-name-specifier refers to the current instantiation,
// the "typename" keyword itself is superfluous. In C++03, the
// program is actually ill-formed. However, DR 382 (in C++0x CD1)
// allows such extraneous "typename" keywords, and we retroactively
// apply this DR to C++03 code with only a warning. In any case we continue.
if (RequireCompleteDeclContext(SS, Ctx))
return QualType();
DeclarationName Name(&II);
LookupResult Result(*this, Name, IILoc, LookupOrdinaryName);
LookupQualifiedName(Result, Ctx, SS);
unsigned DiagID = 0;
Decl *Referenced = nullptr;
switch (Result.getResultKind()) {
case LookupResult::NotFound: {
// If we're looking up 'type' within a template named 'enable_if', produce
// a more specific diagnostic.
SourceRange CondRange;
Expr *Cond = nullptr;
if (isEnableIf(QualifierLoc, II, CondRange, Cond)) {
// If we have a condition, narrow it down to the specific failed
// condition.
if (Cond) {
Expr *FailedCond;
std::string FailedDescription;
std::tie(FailedCond, FailedDescription) =
findFailedBooleanCondition(Cond, /*AllowTopLevelCond=*/true);
Diag(FailedCond->getExprLoc(),
diag::err_typename_nested_not_found_requirement)
<< FailedDescription
<< FailedCond->getSourceRange();
return QualType();
}
Diag(CondRange.getBegin(), diag::err_typename_nested_not_found_enable_if)
<< Ctx << CondRange;
return QualType();
}
DiagID = diag::err_typename_nested_not_found;
break;
}
case LookupResult::FoundUnresolvedValue: {
// We found a using declaration that is a value. Most likely, the using
// declaration itself is meant to have the 'typename' keyword.
SourceRange FullRange(KeywordLoc.isValid() ? KeywordLoc : SS.getBeginLoc(),
IILoc);
Diag(IILoc, diag::err_typename_refers_to_using_value_decl)
<< Name << Ctx << FullRange;
if (UnresolvedUsingValueDecl *Using
= dyn_cast<UnresolvedUsingValueDecl>(Result.getRepresentativeDecl())){
SourceLocation Loc = Using->getQualifierLoc().getBeginLoc();
Diag(Loc, diag::note_using_value_decl_missing_typename)
<< FixItHint::CreateInsertion(Loc, "typename ");
}
}
// Fall through to create a dependent typename type, from which we can recover
// better.
LLVM_FALLTHROUGH;
case LookupResult::NotFoundInCurrentInstantiation:
// Okay, it's a member of an unknown instantiation.
return Context.getDependentNameType(Keyword,
QualifierLoc.getNestedNameSpecifier(),
&II);
case LookupResult::Found:
if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) {
// C++ [class.qual]p2:
// In a lookup in which function names are not ignored and the
// nested-name-specifier nominates a class C, if the name specified
// after the nested-name-specifier, when looked up in C, is the
// injected-class-name of C [...] then the name is instead considered
// to name the constructor of class C.
//
// Unlike in an elaborated-type-specifier, function names are not ignored
// in typename-specifier lookup. However, they are ignored in all the
// contexts where we form a typename type with no keyword (that is, in
// mem-initializer-ids, base-specifiers, and elaborated-type-specifiers).
//
// FIXME: That's not strictly true: mem-initializer-id lookup does not
// ignore functions, but that appears to be an oversight.
auto *LookupRD = dyn_cast_or_null<CXXRecordDecl>(Ctx);
auto *FoundRD = dyn_cast<CXXRecordDecl>(Type);
if (Keyword == ETK_Typename && LookupRD && FoundRD &&
FoundRD->isInjectedClassName() &&
declaresSameEntity(LookupRD, cast<Decl>(FoundRD->getParent())))
Diag(IILoc, diag::ext_out_of_line_qualified_id_type_names_constructor)
<< &II << 1 << 0 /*'typename' keyword used*/;
// We found a type. Build an ElaboratedType, since the
// typename-specifier was just sugar.
MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
return Context.getElaboratedType(Keyword,
QualifierLoc.getNestedNameSpecifier(),
Context.getTypeDeclType(Type));
}
// C++ [dcl.type.simple]p2:
// A type-specifier of the form
// typename[opt] nested-name-specifier[opt] template-name
// is a placeholder for a deduced class type [...].
if (getLangOpts().CPlusPlus17) {
if (auto *TD = getAsTypeTemplateDecl(Result.getFoundDecl())) {
return Context.getElaboratedType(
Keyword, QualifierLoc.getNestedNameSpecifier(),
Context.getDeducedTemplateSpecializationType(TemplateName(TD),
QualType(), false));
}
}
DiagID = diag::err_typename_nested_not_type;
Referenced = Result.getFoundDecl();
break;
case LookupResult::FoundOverloaded:
DiagID = diag::err_typename_nested_not_type;
Referenced = *Result.begin();
break;
case LookupResult::Ambiguous:
return QualType();
}
// If we get here, it's because name lookup did not find a
// type. Emit an appropriate diagnostic and return an error.
SourceRange FullRange(KeywordLoc.isValid() ? KeywordLoc : SS.getBeginLoc(),
IILoc);
Diag(IILoc, DiagID) << FullRange << Name << Ctx;
if (Referenced)
Diag(Referenced->getLocation(), diag::note_typename_refers_here)
<< Name;
return QualType();
}
namespace {
// See Sema::RebuildTypeInCurrentInstantiation
class CurrentInstantiationRebuilder
: public TreeTransform<CurrentInstantiationRebuilder> {
SourceLocation Loc;
DeclarationName Entity;
public:
typedef TreeTransform<CurrentInstantiationRebuilder> inherited;
CurrentInstantiationRebuilder(Sema &SemaRef,
SourceLocation Loc,
DeclarationName Entity)
: TreeTransform<CurrentInstantiationRebuilder>(SemaRef),
Loc(Loc), Entity(Entity) { }
/// Determine whether the given type \p T has already been
/// transformed.
///
/// For the purposes of type reconstruction, a type has already been
/// transformed if it is NULL or if it is not dependent.
bool AlreadyTransformed(QualType T) {
return T.isNull() || !T->isDependentType();
}
/// Returns the location of the entity whose type is being
/// rebuilt.
SourceLocation getBaseLocation() { return Loc; }
/// Returns the name of the entity whose type is being rebuilt.
DeclarationName getBaseEntity() { return Entity; }
/// Sets the "base" location and entity when that
/// information is known based on another transformation.
void setBase(SourceLocation Loc, DeclarationName Entity) {
this->Loc = Loc;
this->Entity = Entity;
}
ExprResult TransformLambdaExpr(LambdaExpr *E) {
// Lambdas never need to be transformed.
return E;
}
};
} // end anonymous namespace
/// Rebuilds a type within the context of the current instantiation.
///
/// The type \p T is part of the type of an out-of-line member definition of
/// a class template (or class template partial specialization) that was parsed
/// and constructed before we entered the scope of the class template (or
/// partial specialization thereof). This routine will rebuild that type now
/// that we have entered the declarator's scope, which may produce different
/// canonical types, e.g.,
///
/// \code
/// template<typename T>
/// struct X {
/// typedef T* pointer;
/// pointer data();
/// };
///
/// template<typename T>
/// typename X<T>::pointer X<T>::data() { ... }
/// \endcode
///
/// Here, the type "typename X<T>::pointer" will be created as a DependentNameType,
/// since we do not know that we can look into X<T> when we parsed the type.
/// This function will rebuild the type, performing the lookup of "pointer"
/// in X<T> and returning an ElaboratedType whose canonical type is the same
/// as the canonical type of T*, allowing the return types of the out-of-line
/// definition and the declaration to match.
TypeSourceInfo *Sema::RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name) {
if (!T || !T->getType()->isDependentType())
return T;
CurrentInstantiationRebuilder Rebuilder(*this, Loc, Name);
return Rebuilder.TransformType(T);
}
ExprResult Sema::RebuildExprInCurrentInstantiation(Expr *E) {
CurrentInstantiationRebuilder Rebuilder(*this, E->getExprLoc(),
DeclarationName());
return Rebuilder.TransformExpr(E);
}
bool Sema::RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS) {
if (SS.isInvalid())
return true;
NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
CurrentInstantiationRebuilder Rebuilder(*this, SS.getRange().getBegin(),
DeclarationName());
NestedNameSpecifierLoc Rebuilt
= Rebuilder.TransformNestedNameSpecifierLoc(QualifierLoc);
if (!Rebuilt)
return true;
SS.Adopt(Rebuilt);
return false;
}
/// Rebuild the template parameters now that we know we're in a current
/// instantiation.
bool Sema::RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params) {
for (unsigned I = 0, N = Params->size(); I != N; ++I) {
Decl *Param = Params->getParam(I);
// There is nothing to rebuild in a type parameter.
if (isa<TemplateTypeParmDecl>(Param))
continue;
// Rebuild the template parameter list of a template template parameter.
if (TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(Param)) {
if (RebuildTemplateParamsInCurrentInstantiation(
TTP->getTemplateParameters()))
return true;
continue;
}
// Rebuild the type of a non-type template parameter.
NonTypeTemplateParmDecl *NTTP = cast<NonTypeTemplateParmDecl>(Param);
TypeSourceInfo *NewTSI
= RebuildTypeInCurrentInstantiation(NTTP->getTypeSourceInfo(),
NTTP->getLocation(),
NTTP->getDeclName());
if (!NewTSI)
return true;
if (NewTSI != NTTP->getTypeSourceInfo()) {
NTTP->setTypeSourceInfo(NewTSI);
NTTP->setType(NewTSI->getType());
}
}
return false;
}
/// Produces a formatted string that describes the binding of
/// template parameters to template arguments.
std::string
Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args) {
return getTemplateArgumentBindingsText(Params, Args.data(), Args.size());
}
std::string
Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs) {
SmallString<128> Str;
llvm::raw_svector_ostream Out(Str);
if (!Params || Params->size() == 0 || NumArgs == 0)
return std::string();
for (unsigned I = 0, N = Params->size(); I != N; ++I) {
if (I >= NumArgs)
break;
if (I == 0)
Out << "[with ";
else
Out << ", ";
if (const IdentifierInfo *Id = Params->getParam(I)->getIdentifier()) {
Out << Id->getName();
} else {
Out << '$' << I;
}
Out << " = ";
Args[I].print(getPrintingPolicy(), Out);
}
Out << ']';
return Out.str();
}
void Sema::MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks) {
if (!FD)
return;
auto LPT = llvm::make_unique<LateParsedTemplate>();
// Take tokens to avoid allocations
LPT->Toks.swap(Toks);
LPT->D = FnD;
LateParsedTemplateMap.insert(std::make_pair(FD, std::move(LPT)));
FD->setLateTemplateParsed(true);
}
void Sema::UnmarkAsLateParsedTemplate(FunctionDecl *FD) {
if (!FD)
return;
FD->setLateTemplateParsed(false);
}
bool Sema::IsInsideALocalClassWithinATemplateFunction() {
DeclContext *DC = CurContext;
while (DC) {
if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(CurContext)) {
const FunctionDecl *FD = RD->isLocalClass();
return (FD && FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate);
} else if (DC->isTranslationUnit() || DC->isNamespace())
return false;
DC = DC->getParent();
}
return false;
}
namespace {
/// Walk the path from which a declaration was instantiated, and check
/// that every explicit specialization along that path is visible. This enforces
/// C++ [temp.expl.spec]/6:
///
/// If a template, a member template or a member of a class template is
/// explicitly specialized then that specialization shall be declared before
/// the first use of that specialization that would cause an implicit
/// instantiation to take place, in every translation unit in which such a
/// use occurs; no diagnostic is required.
///
/// and also C++ [temp.class.spec]/1:
///
/// A partial specialization shall be declared before the first use of a
/// class template specialization that would make use of the partial
/// specialization as the result of an implicit or explicit instantiation
/// in every translation unit in which such a use occurs; no diagnostic is
/// required.
class ExplicitSpecializationVisibilityChecker {
Sema &S;
SourceLocation Loc;
llvm::SmallVector<Module *, 8> Modules;
public:
ExplicitSpecializationVisibilityChecker(Sema &S, SourceLocation Loc)
: S(S), Loc(Loc) {}
void check(NamedDecl *ND) {
if (auto *FD = dyn_cast<FunctionDecl>(ND))
return checkImpl(FD);
if (auto *RD = dyn_cast<CXXRecordDecl>(ND))
return checkImpl(RD);
if (auto *VD = dyn_cast<VarDecl>(ND))
return checkImpl(VD);
if (auto *ED = dyn_cast<EnumDecl>(ND))
return checkImpl(ED);
}
private:
void diagnose(NamedDecl *D, bool IsPartialSpec) {
auto Kind = IsPartialSpec ? Sema::MissingImportKind::PartialSpecialization
: Sema::MissingImportKind::ExplicitSpecialization;
const bool Recover = true;
// If we got a custom set of modules (because only a subset of the
// declarations are interesting), use them, otherwise let
// diagnoseMissingImport intelligently pick some.
if (Modules.empty())
S.diagnoseMissingImport(Loc, D, Kind, Recover);
else
S.diagnoseMissingImport(Loc, D, D->getLocation(), Modules, Kind, Recover);
}
// Check a specific declaration. There are three problematic cases:
//
// 1) The declaration is an explicit specialization of a template
// specialization.
// 2) The declaration is an explicit specialization of a member of an
// templated class.
// 3) The declaration is an instantiation of a template, and that template
// is an explicit specialization of a member of a templated class.
//
// We don't need to go any deeper than that, as the instantiation of the
// surrounding class / etc is not triggered by whatever triggered this
// instantiation, and thus should be checked elsewhere.
template<typename SpecDecl>
void checkImpl(SpecDecl *Spec) {
bool IsHiddenExplicitSpecialization = false;
if (Spec->getTemplateSpecializationKind() == TSK_ExplicitSpecialization) {
IsHiddenExplicitSpecialization =
Spec->getMemberSpecializationInfo()
? !S.hasVisibleMemberSpecialization(Spec, &Modules)
: !S.hasVisibleExplicitSpecialization(Spec, &Modules);
} else {
checkInstantiated(Spec);
}
if (IsHiddenExplicitSpecialization)
diagnose(Spec->getMostRecentDecl(), false);
}
void checkInstantiated(FunctionDecl *FD) {
if (auto *TD = FD->getPrimaryTemplate())
checkTemplate(TD);
}
void checkInstantiated(CXXRecordDecl *RD) {
auto *SD = dyn_cast<ClassTemplateSpecializationDecl>(RD);
if (!SD)
return;
auto From = SD->getSpecializedTemplateOrPartial();
if (auto *TD = From.dyn_cast<ClassTemplateDecl *>())
checkTemplate(TD);
else if (auto *TD =
From.dyn_cast<ClassTemplatePartialSpecializationDecl *>()) {
if (!S.hasVisibleDeclaration(TD))
diagnose(TD, true);
checkTemplate(TD);
}
}
void checkInstantiated(VarDecl *RD) {
auto *SD = dyn_cast<VarTemplateSpecializationDecl>(RD);
if (!SD)
return;
auto From = SD->getSpecializedTemplateOrPartial();
if (auto *TD = From.dyn_cast<VarTemplateDecl *>())
checkTemplate(TD);
else if (auto *TD =
From.dyn_cast<VarTemplatePartialSpecializationDecl *>()) {
if (!S.hasVisibleDeclaration(TD))
diagnose(TD, true);
checkTemplate(TD);
}
}
void checkInstantiated(EnumDecl *FD) {}
template<typename TemplDecl>
void checkTemplate(TemplDecl *TD) {
if (TD->isMemberSpecialization()) {
if (!S.hasVisibleMemberSpecialization(TD, &Modules))
diagnose(TD->getMostRecentDecl(), false);
}
}
};
} // end anonymous namespace
void Sema::checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec) {
if (!getLangOpts().Modules)
return;
ExplicitSpecializationVisibilityChecker(*this, Loc).check(Spec);
}
/// Check whether a template partial specialization that we've discovered
/// is hidden, and produce suitable diagnostics if so.
void Sema::checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec) {
llvm::SmallVector<Module *, 8> Modules;
if (!hasVisibleDeclaration(Spec, &Modules))
diagnoseMissingImport(Loc, Spec, Spec->getLocation(), Modules,
MissingImportKind::PartialSpecialization,
/*Recover*/true);
}
Index: projects/clang700-import/contrib/llvm/tools/clang
===================================================================
--- projects/clang700-import/contrib/llvm/tools/clang (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/clang (revision 340125)
Property changes on: projects/clang700-import/contrib/llvm/tools/clang
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /vendor/clang/dist-release_70:r338729-340124
Index: projects/clang700-import/contrib/llvm/tools/lld/COFF/Chunks.h
===================================================================
--- projects/clang700-import/contrib/llvm/tools/lld/COFF/Chunks.h (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/lld/COFF/Chunks.h (revision 340125)
@@ -1,429 +1,431 @@
//===- Chunks.h -------------------------------------------------*- C++ -*-===//
//
// The LLVM Linker
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLD_COFF_CHUNKS_H
#define LLD_COFF_CHUNKS_H
#include "Config.h"
#include "InputFiles.h"
#include "lld/Common/LLVM.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/MC/StringTableBuilder.h"
#include "llvm/Object/COFF.h"
#include <utility>
#include <vector>
namespace lld {
namespace coff {
using llvm::COFF::ImportDirectoryTableEntry;
using llvm::object::COFFSymbolRef;
using llvm::object::SectionRef;
using llvm::object::coff_relocation;
using llvm::object::coff_section;
class Baserel;
class Defined;
class DefinedImportData;
class DefinedRegular;
class ObjFile;
class OutputSection;
class Symbol;
// Mask for permissions (discardable, writable, readable, executable, etc).
const uint32_t PermMask = 0xFE000000;
// Mask for section types (code, data, bss).
const uint32_t TypeMask = 0x000000E0;
// A Chunk represents a chunk of data that will occupy space in the
// output (if the resolver chose that). It may or may not be backed by
// a section of an input file. It could be linker-created data, or
// doesn't even have actual data (if common or bss).
class Chunk {
public:
enum Kind { SectionKind, OtherKind };
Kind kind() const { return ChunkKind; }
virtual ~Chunk() = default;
// Returns the size of this chunk (even if this is a common or BSS.)
virtual size_t getSize() const = 0;
// Write this chunk to a mmap'ed file, assuming Buf is pointing to
// beginning of the file. Because this function may use RVA values
// of other chunks for relocations, you need to set them properly
// before calling this function.
virtual void writeTo(uint8_t *Buf) const {}
// Called by the writer after an RVA is assigned, but before calling
// getSize().
virtual void finalizeContents() {}
// The writer sets and uses the addresses.
uint64_t getRVA() const { return RVA; }
void setRVA(uint64_t V) { RVA = V; }
// Returns true if this has non-zero data. BSS chunks return
// false. If false is returned, the space occupied by this chunk
// will be filled with zeros.
virtual bool hasData() const { return true; }
// Returns readable/writable/executable bits.
virtual uint32_t getOutputCharacteristics() const { return 0; }
// Returns the section name if this is a section chunk.
// It is illegal to call this function on non-section chunks.
virtual StringRef getSectionName() const {
llvm_unreachable("unimplemented getSectionName");
}
// An output section has pointers to chunks in the section, and each
// chunk has a back pointer to an output section.
void setOutputSection(OutputSection *O) { Out = O; }
OutputSection *getOutputSection() const { return Out; }
// Windows-specific.
// Collect all locations that contain absolute addresses for base relocations.
virtual void getBaserels(std::vector<Baserel> *Res) {}
// Returns a human-readable name of this chunk. Chunks are unnamed chunks of
// bytes, so this is used only for logging or debugging.
virtual StringRef getDebugName() { return ""; }
// The alignment of this chunk. The writer uses the value.
uint32_t Alignment = 1;
protected:
Chunk(Kind K = OtherKind) : ChunkKind(K) {}
const Kind ChunkKind;
// The RVA of this chunk in the output. The writer sets a value.
uint64_t RVA = 0;
// The output section for this chunk.
OutputSection *Out = nullptr;
public:
// The offset from beginning of the output section. The writer sets a value.
uint64_t OutputSectionOff = 0;
};
// A chunk corresponding a section of an input file.
class SectionChunk final : public Chunk {
// Identical COMDAT Folding feature accesses section internal data.
friend class ICF;
public:
class symbol_iterator : public llvm::iterator_adaptor_base<
symbol_iterator, const coff_relocation *,
std::random_access_iterator_tag, Symbol *> {
friend SectionChunk;
ObjFile *File;
symbol_iterator(ObjFile *File, const coff_relocation *I)
: symbol_iterator::iterator_adaptor_base(I), File(File) {}
public:
symbol_iterator() = default;
Symbol *operator*() const { return File->getSymbol(I->SymbolTableIndex); }
};
SectionChunk(ObjFile *File, const coff_section *Header);
static bool classof(const Chunk *C) { return C->kind() == SectionKind; }
size_t getSize() const override { return Header->SizeOfRawData; }
ArrayRef<uint8_t> getContents() const;
void writeTo(uint8_t *Buf) const override;
bool hasData() const override;
uint32_t getOutputCharacteristics() const override;
StringRef getSectionName() const override { return SectionName; }
void getBaserels(std::vector<Baserel> *Res) override;
bool isCOMDAT() const;
void applyRelX64(uint8_t *Off, uint16_t Type, OutputSection *OS, uint64_t S,
uint64_t P) const;
void applyRelX86(uint8_t *Off, uint16_t Type, OutputSection *OS, uint64_t S,
uint64_t P) const;
void applyRelARM(uint8_t *Off, uint16_t Type, OutputSection *OS, uint64_t S,
uint64_t P) const;
void applyRelARM64(uint8_t *Off, uint16_t Type, OutputSection *OS, uint64_t S,
uint64_t P) const;
// Called if the garbage collector decides to not include this chunk
// in a final output. It's supposed to print out a log message to stdout.
void printDiscardedMessage() const;
// Adds COMDAT associative sections to this COMDAT section. A chunk
// and its children are treated as a group by the garbage collector.
void addAssociative(SectionChunk *Child);
StringRef getDebugName() override;
// Returns true if the chunk was not dropped by GC.
bool isLive() { return Live; }
// Used by the garbage collector.
void markLive() {
assert(Config->DoGC && "should only mark things live from GC");
assert(!isLive() && "Cannot mark an already live section!");
Live = true;
}
// True if this is a codeview debug info chunk. These will not be laid out in
// the image. Instead they will end up in the PDB, if one is requested.
bool isCodeView() const {
return SectionName == ".debug" || SectionName.startswith(".debug$");
}
// True if this is a DWARF debug info or exception handling chunk.
bool isDWARF() const {
return SectionName.startswith(".debug_") || SectionName == ".eh_frame";
}
// Allow iteration over the bodies of this chunk's relocated symbols.
llvm::iterator_range<symbol_iterator> symbols() const {
return llvm::make_range(symbol_iterator(File, Relocs.begin()),
symbol_iterator(File, Relocs.end()));
}
// Allow iteration over the associated child chunks for this section.
ArrayRef<SectionChunk *> children() const { return AssocChildren; }
// A pointer pointing to a replacement for this chunk.
// Initially it points to "this" object. If this chunk is merged
// with other chunk by ICF, it points to another chunk,
// and this chunk is considrered as dead.
SectionChunk *Repl;
// The CRC of the contents as described in the COFF spec 4.5.5.
// Auxiliary Format 5: Section Definitions. Used for ICF.
uint32_t Checksum = 0;
const coff_section *Header;
// The file that this chunk was created from.
ObjFile *File;
// The COMDAT leader symbol if this is a COMDAT chunk.
DefinedRegular *Sym = nullptr;
ArrayRef<coff_relocation> Relocs;
private:
StringRef SectionName;
std::vector<SectionChunk *> AssocChildren;
// Used by the garbage collector.
bool Live;
// Used for ICF (Identical COMDAT Folding)
void replace(SectionChunk *Other);
uint32_t Class[2] = {0, 0};
};
// This class is used to implement an lld-specific feature (not implemented in
// MSVC) that minimizes the output size by finding string literals sharing tail
// parts and merging them.
//
// If string tail merging is enabled and a section is identified as containing a
// string literal, it is added to a MergeChunk with an appropriate alignment.
// The MergeChunk then tail merges the strings using the StringTableBuilder
// class and assigns RVAs and section offsets to each of the member chunks based
// on the offsets assigned by the StringTableBuilder.
class MergeChunk : public Chunk {
public:
MergeChunk(uint32_t Alignment);
static void addSection(SectionChunk *C);
void finalizeContents() override;
uint32_t getOutputCharacteristics() const override;
StringRef getSectionName() const override { return ".rdata"; }
size_t getSize() const override;
void writeTo(uint8_t *Buf) const override;
static std::map<uint32_t, MergeChunk *> Instances;
std::vector<SectionChunk *> Sections;
private:
llvm::StringTableBuilder Builder;
};
// A chunk for common symbols. Common chunks don't have actual data.
class CommonChunk : public Chunk {
public:
CommonChunk(const COFFSymbolRef Sym);
size_t getSize() const override { return Sym.getValue(); }
bool hasData() const override { return false; }
uint32_t getOutputCharacteristics() const override;
StringRef getSectionName() const override { return ".bss"; }
private:
const COFFSymbolRef Sym;
};
// A chunk for linker-created strings.
class StringChunk : public Chunk {
public:
explicit StringChunk(StringRef S) : Str(S) {}
size_t getSize() const override { return Str.size() + 1; }
void writeTo(uint8_t *Buf) const override;
private:
StringRef Str;
};
static const uint8_t ImportThunkX86[] = {
0xff, 0x25, 0x00, 0x00, 0x00, 0x00, // JMP *0x0
};
static const uint8_t ImportThunkARM[] = {
0x40, 0xf2, 0x00, 0x0c, // mov.w ip, #0
0xc0, 0xf2, 0x00, 0x0c, // mov.t ip, #0
0xdc, 0xf8, 0x00, 0xf0, // ldr.w pc, [ip]
};
static const uint8_t ImportThunkARM64[] = {
0x10, 0x00, 0x00, 0x90, // adrp x16, #0
0x10, 0x02, 0x40, 0xf9, // ldr x16, [x16]
0x00, 0x02, 0x1f, 0xd6, // br x16
};
// Windows-specific.
// A chunk for DLL import jump table entry. In a final output, it's
// contents will be a JMP instruction to some __imp_ symbol.
class ImportThunkChunkX64 : public Chunk {
public:
explicit ImportThunkChunkX64(Defined *S);
size_t getSize() const override { return sizeof(ImportThunkX86); }
void writeTo(uint8_t *Buf) const override;
private:
Defined *ImpSymbol;
};
class ImportThunkChunkX86 : public Chunk {
public:
explicit ImportThunkChunkX86(Defined *S) : ImpSymbol(S) {}
size_t getSize() const override { return sizeof(ImportThunkX86); }
void getBaserels(std::vector<Baserel> *Res) override;
void writeTo(uint8_t *Buf) const override;
private:
Defined *ImpSymbol;
};
class ImportThunkChunkARM : public Chunk {
public:
explicit ImportThunkChunkARM(Defined *S) : ImpSymbol(S) {}
size_t getSize() const override { return sizeof(ImportThunkARM); }
void getBaserels(std::vector<Baserel> *Res) override;
void writeTo(uint8_t *Buf) const override;
private:
Defined *ImpSymbol;
};
class ImportThunkChunkARM64 : public Chunk {
public:
explicit ImportThunkChunkARM64(Defined *S) : ImpSymbol(S) {}
size_t getSize() const override { return sizeof(ImportThunkARM64); }
void writeTo(uint8_t *Buf) const override;
private:
Defined *ImpSymbol;
};
// Windows-specific.
// See comments for DefinedLocalImport class.
class LocalImportChunk : public Chunk {
public:
- explicit LocalImportChunk(Defined *S) : Sym(S) {}
+ explicit LocalImportChunk(Defined *S) : Sym(S) {
+ Alignment = Config->is64() ? 8 : 4;
+ }
size_t getSize() const override;
void getBaserels(std::vector<Baserel> *Res) override;
void writeTo(uint8_t *Buf) const override;
private:
Defined *Sym;
};
// Duplicate RVAs are not allowed in RVA tables, so unique symbols by chunk and
// offset into the chunk. Order does not matter as the RVA table will be sorted
// later.
struct ChunkAndOffset {
Chunk *InputChunk;
uint32_t Offset;
struct DenseMapInfo {
static ChunkAndOffset getEmptyKey() {
return {llvm::DenseMapInfo<Chunk *>::getEmptyKey(), 0};
}
static ChunkAndOffset getTombstoneKey() {
return {llvm::DenseMapInfo<Chunk *>::getTombstoneKey(), 0};
}
static unsigned getHashValue(const ChunkAndOffset &CO) {
return llvm::DenseMapInfo<std::pair<Chunk *, uint32_t>>::getHashValue(
{CO.InputChunk, CO.Offset});
}
static bool isEqual(const ChunkAndOffset &LHS, const ChunkAndOffset &RHS) {
return LHS.InputChunk == RHS.InputChunk && LHS.Offset == RHS.Offset;
}
};
};
using SymbolRVASet = llvm::DenseSet<ChunkAndOffset>;
// Table which contains symbol RVAs. Used for /safeseh and /guard:cf.
class RVATableChunk : public Chunk {
public:
explicit RVATableChunk(SymbolRVASet S) : Syms(std::move(S)) {}
size_t getSize() const override { return Syms.size() * 4; }
void writeTo(uint8_t *Buf) const override;
private:
SymbolRVASet Syms;
};
// Windows-specific.
// This class represents a block in .reloc section.
// See the PE/COFF spec 5.6 for details.
class BaserelChunk : public Chunk {
public:
BaserelChunk(uint32_t Page, Baserel *Begin, Baserel *End);
size_t getSize() const override { return Data.size(); }
void writeTo(uint8_t *Buf) const override;
private:
std::vector<uint8_t> Data;
};
class Baserel {
public:
Baserel(uint32_t V, uint8_t Ty) : RVA(V), Type(Ty) {}
explicit Baserel(uint32_t V) : Baserel(V, getDefaultType()) {}
uint8_t getDefaultType();
uint32_t RVA;
uint8_t Type;
};
void applyMOV32T(uint8_t *Off, uint32_t V);
void applyBranch24T(uint8_t *Off, int32_t V);
} // namespace coff
} // namespace lld
namespace llvm {
template <>
struct DenseMapInfo<lld::coff::ChunkAndOffset>
: lld::coff::ChunkAndOffset::DenseMapInfo {};
}
#endif
Index: projects/clang700-import/contrib/llvm/tools/lld/ELF/ScriptParser.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/tools/lld/ELF/ScriptParser.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/lld/ELF/ScriptParser.cpp (revision 340125)
@@ -1,1472 +1,1480 @@
//===- ScriptParser.cpp ---------------------------------------------------===//
//
// The LLVM Linker
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains a recursive-descendent parser for linker scripts.
// Parsed results are stored to Config and Script global objects.
//
//===----------------------------------------------------------------------===//
#include "ScriptParser.h"
#include "Config.h"
#include "Driver.h"
#include "InputSection.h"
#include "LinkerScript.h"
#include "OutputSections.h"
#include "ScriptLexer.h"
#include "Symbols.h"
#include "Target.h"
#include "lld/Common/Memory.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include <cassert>
#include <limits>
#include <vector>
using namespace llvm;
using namespace llvm::ELF;
using namespace llvm::support::endian;
using namespace lld;
using namespace lld::elf;
static bool isUnderSysroot(StringRef Path);
namespace {
class ScriptParser final : ScriptLexer {
public:
ScriptParser(MemoryBufferRef MB)
: ScriptLexer(MB),
IsUnderSysroot(isUnderSysroot(MB.getBufferIdentifier())) {}
void readLinkerScript();
void readVersionScript();
void readDynamicList();
void readDefsym(StringRef Name);
private:
void addFile(StringRef Path);
void readAsNeeded();
void readEntry();
void readExtern();
void readGroup();
void readInclude();
void readInput();
void readMemory();
void readOutput();
void readOutputArch();
void readOutputFormat();
void readPhdrs();
void readRegionAlias();
void readSearchDir();
void readSections();
void readVersion();
void readVersionScriptCommand();
SymbolAssignment *readSymbolAssignment(StringRef Name);
ByteCommand *readByteCommand(StringRef Tok);
uint32_t readFill();
uint32_t parseFill(StringRef Tok);
void readSectionAddressType(OutputSection *Cmd);
OutputSection *readOverlaySectionDescription();
OutputSection *readOutputSectionDescription(StringRef OutSec);
std::vector<BaseCommand *> readOverlay();
std::vector<StringRef> readOutputSectionPhdrs();
InputSectionDescription *readInputSectionDescription(StringRef Tok);
StringMatcher readFilePatterns();
std::vector<SectionPattern> readInputSectionsList();
InputSectionDescription *readInputSectionRules(StringRef FilePattern);
unsigned readPhdrType();
SortSectionPolicy readSortKind();
SymbolAssignment *readProvideHidden(bool Provide, bool Hidden);
SymbolAssignment *readAssignment(StringRef Tok);
void readSort();
Expr readAssert();
Expr readConstant();
Expr getPageSize();
uint64_t readMemoryAssignment(StringRef, StringRef, StringRef);
std::pair<uint32_t, uint32_t> readMemoryAttributes();
Expr combine(StringRef Op, Expr L, Expr R);
Expr readExpr();
Expr readExpr1(Expr Lhs, int MinPrec);
StringRef readParenLiteral();
Expr readPrimary();
Expr readTernary(Expr Cond);
Expr readParenExpr();
// For parsing version script.
std::vector<SymbolVersion> readVersionExtern();
void readAnonymousDeclaration();
void readVersionDeclaration(StringRef VerStr);
std::pair<std::vector<SymbolVersion>, std::vector<SymbolVersion>>
readSymbols();
// True if a script being read is in a subdirectory specified by -sysroot.
bool IsUnderSysroot;
// A set to detect an INCLUDE() cycle.
StringSet<> Seen;
};
} // namespace
static StringRef unquote(StringRef S) {
if (S.startswith("\""))
return S.substr(1, S.size() - 2);
return S;
}
static bool isUnderSysroot(StringRef Path) {
if (Config->Sysroot == "")
return false;
for (; !Path.empty(); Path = sys::path::parent_path(Path))
if (sys::fs::equivalent(Config->Sysroot, Path))
return true;
return false;
}
// Some operations only support one non absolute value. Move the
// absolute one to the right hand side for convenience.
static void moveAbsRight(ExprValue &A, ExprValue &B) {
if (A.Sec == nullptr || (A.ForceAbsolute && !B.isAbsolute()))
std::swap(A, B);
if (!B.isAbsolute())
error(A.Loc + ": at least one side of the expression must be absolute");
}
static ExprValue add(ExprValue A, ExprValue B) {
moveAbsRight(A, B);
return {A.Sec, A.ForceAbsolute, A.getSectionOffset() + B.getValue(), A.Loc};
}
static ExprValue sub(ExprValue A, ExprValue B) {
// The distance between two symbols in sections is absolute.
if (!A.isAbsolute() && !B.isAbsolute())
return A.getValue() - B.getValue();
return {A.Sec, false, A.getSectionOffset() - B.getValue(), A.Loc};
}
static ExprValue bitAnd(ExprValue A, ExprValue B) {
moveAbsRight(A, B);
return {A.Sec, A.ForceAbsolute,
(A.getValue() & B.getValue()) - A.getSecAddr(), A.Loc};
}
static ExprValue bitOr(ExprValue A, ExprValue B) {
moveAbsRight(A, B);
return {A.Sec, A.ForceAbsolute,
(A.getValue() | B.getValue()) - A.getSecAddr(), A.Loc};
}
void ScriptParser::readDynamicList() {
Config->HasDynamicList = true;
expect("{");
std::vector<SymbolVersion> Locals;
std::vector<SymbolVersion> Globals;
std::tie(Locals, Globals) = readSymbols();
expect(";");
if (!atEOF()) {
setError("EOF expected, but got " + next());
return;
}
if (!Locals.empty()) {
setError("\"local:\" scope not supported in --dynamic-list");
return;
}
for (SymbolVersion V : Globals)
Config->DynamicList.push_back(V);
}
void ScriptParser::readVersionScript() {
readVersionScriptCommand();
if (!atEOF())
setError("EOF expected, but got " + next());
}
void ScriptParser::readVersionScriptCommand() {
if (consume("{")) {
readAnonymousDeclaration();
return;
}
while (!atEOF() && !errorCount() && peek() != "}") {
StringRef VerStr = next();
if (VerStr == "{") {
setError("anonymous version definition is used in "
"combination with other version definitions");
return;
}
expect("{");
readVersionDeclaration(VerStr);
}
}
void ScriptParser::readVersion() {
expect("{");
readVersionScriptCommand();
expect("}");
}
void ScriptParser::readLinkerScript() {
while (!atEOF()) {
StringRef Tok = next();
if (Tok == ";")
continue;
if (Tok == "ENTRY") {
readEntry();
} else if (Tok == "EXTERN") {
readExtern();
} else if (Tok == "GROUP") {
readGroup();
} else if (Tok == "INCLUDE") {
readInclude();
} else if (Tok == "INPUT") {
readInput();
} else if (Tok == "MEMORY") {
readMemory();
} else if (Tok == "OUTPUT") {
readOutput();
} else if (Tok == "OUTPUT_ARCH") {
readOutputArch();
} else if (Tok == "OUTPUT_FORMAT") {
readOutputFormat();
} else if (Tok == "PHDRS") {
readPhdrs();
} else if (Tok == "REGION_ALIAS") {
readRegionAlias();
} else if (Tok == "SEARCH_DIR") {
readSearchDir();
} else if (Tok == "SECTIONS") {
readSections();
} else if (Tok == "VERSION") {
readVersion();
} else if (SymbolAssignment *Cmd = readAssignment(Tok)) {
Script->SectionCommands.push_back(Cmd);
} else {
setError("unknown directive: " + Tok);
}
}
}
void ScriptParser::readDefsym(StringRef Name) {
Expr E = readExpr();
if (!atEOF())
setError("EOF expected, but got " + next());
SymbolAssignment *Cmd = make<SymbolAssignment>(Name, E, getCurrentLocation());
Script->SectionCommands.push_back(Cmd);
}
void ScriptParser::addFile(StringRef S) {
if (IsUnderSysroot && S.startswith("/")) {
SmallString<128> PathData;
StringRef Path = (Config->Sysroot + S).toStringRef(PathData);
if (sys::fs::exists(Path)) {
Driver->addFile(Saver.save(Path), /*WithLOption=*/false);
return;
}
}
if (S.startswith("/")) {
Driver->addFile(S, /*WithLOption=*/false);
} else if (S.startswith("=")) {
if (Config->Sysroot.empty())
Driver->addFile(S.substr(1), /*WithLOption=*/false);
else
Driver->addFile(Saver.save(Config->Sysroot + "/" + S.substr(1)),
/*WithLOption=*/false);
} else if (S.startswith("-l")) {
Driver->addLibrary(S.substr(2));
} else if (sys::fs::exists(S)) {
Driver->addFile(S, /*WithLOption=*/false);
} else {
if (Optional<std::string> Path = findFromSearchPaths(S))
Driver->addFile(Saver.save(*Path), /*WithLOption=*/true);
else
setError("unable to find " + S);
}
}
void ScriptParser::readAsNeeded() {
expect("(");
bool Orig = Config->AsNeeded;
Config->AsNeeded = true;
while (!errorCount() && !consume(")"))
addFile(unquote(next()));
Config->AsNeeded = Orig;
}
void ScriptParser::readEntry() {
// -e <symbol> takes predecence over ENTRY(<symbol>).
expect("(");
StringRef Tok = next();
if (Config->Entry.empty())
Config->Entry = Tok;
expect(")");
}
void ScriptParser::readExtern() {
expect("(");
while (!errorCount() && !consume(")"))
Config->Undefined.push_back(next());
}
void ScriptParser::readGroup() {
bool Orig = InputFile::IsInGroup;
InputFile::IsInGroup = true;
readInput();
InputFile::IsInGroup = Orig;
if (!Orig)
++InputFile::NextGroupId;
}
void ScriptParser::readInclude() {
StringRef Tok = unquote(next());
if (!Seen.insert(Tok).second) {
setError("there is a cycle in linker script INCLUDEs");
return;
}
if (Optional<std::string> Path = searchScript(Tok)) {
if (Optional<MemoryBufferRef> MB = readFile(*Path))
tokenize(*MB);
return;
}
setError("cannot find linker script " + Tok);
}
void ScriptParser::readInput() {
expect("(");
while (!errorCount() && !consume(")")) {
if (consume("AS_NEEDED"))
readAsNeeded();
else
addFile(unquote(next()));
}
}
void ScriptParser::readOutput() {
// -o <file> takes predecence over OUTPUT(<file>).
expect("(");
StringRef Tok = next();
if (Config->OutputFile.empty())
Config->OutputFile = unquote(Tok);
expect(")");
}
void ScriptParser::readOutputArch() {
// OUTPUT_ARCH is ignored for now.
expect("(");
while (!errorCount() && !consume(")"))
skip();
}
void ScriptParser::readOutputFormat() {
// Error checking only for now.
expect("(");
skip();
if (consume(")"))
return;
expect(",");
skip();
expect(",");
skip();
expect(")");
}
void ScriptParser::readPhdrs() {
expect("{");
while (!errorCount() && !consume("}")) {
PhdrsCommand Cmd;
Cmd.Name = next();
Cmd.Type = readPhdrType();
while (!errorCount() && !consume(";")) {
if (consume("FILEHDR"))
Cmd.HasFilehdr = true;
else if (consume("PHDRS"))
Cmd.HasPhdrs = true;
else if (consume("AT"))
Cmd.LMAExpr = readParenExpr();
else if (consume("FLAGS"))
Cmd.Flags = readParenExpr()().getValue();
else
setError("unexpected header attribute: " + next());
}
Script->PhdrsCommands.push_back(Cmd);
}
}
void ScriptParser::readRegionAlias() {
expect("(");
StringRef Alias = unquote(next());
expect(",");
StringRef Name = next();
expect(")");
if (Script->MemoryRegions.count(Alias))
setError("redefinition of memory region '" + Alias + "'");
if (!Script->MemoryRegions.count(Name))
setError("memory region '" + Name + "' is not defined");
Script->MemoryRegions.insert({Alias, Script->MemoryRegions[Name]});
}
void ScriptParser::readSearchDir() {
expect("(");
StringRef Tok = next();
if (!Config->Nostdlib)
Config->SearchPaths.push_back(unquote(Tok));
expect(")");
}
// This reads an overlay description. Overlays are used to describe output
// sections that use the same virtual memory range and normally would trigger
// linker's sections sanity check failures.
// https://sourceware.org/binutils/docs/ld/Overlay-Description.html#Overlay-Description
std::vector<BaseCommand *> ScriptParser::readOverlay() {
// VA and LMA expressions are optional, though for simplicity of
// implementation we assume they are not. That is what OVERLAY was designed
// for first of all: to allow sections with overlapping VAs at different LMAs.
Expr AddrExpr = readExpr();
expect(":");
expect("AT");
Expr LMAExpr = readParenExpr();
expect("{");
std::vector<BaseCommand *> V;
OutputSection *Prev = nullptr;
while (!errorCount() && !consume("}")) {
// VA is the same for all sections. The LMAs are consecutive in memory
// starting from the base load address specified.
OutputSection *OS = readOverlaySectionDescription();
OS->AddrExpr = AddrExpr;
if (Prev)
OS->LMAExpr = [=] { return Prev->getLMA() + Prev->Size; };
else
OS->LMAExpr = LMAExpr;
V.push_back(OS);
Prev = OS;
}
// According to the specification, at the end of the overlay, the location
// counter should be equal to the overlay base address plus size of the
// largest section seen in the overlay.
// Here we want to create the Dot assignment command to achieve that.
Expr MoveDot = [=] {
uint64_t Max = 0;
for (BaseCommand *Cmd : V)
Max = std::max(Max, cast<OutputSection>(Cmd)->Size);
return AddrExpr().getValue() + Max;
};
V.push_back(make<SymbolAssignment>(".", MoveDot, getCurrentLocation()));
return V;
}
void ScriptParser::readSections() {
Script->HasSectionsCommand = true;
// -no-rosegment is used to avoid placing read only non-executable sections in
// their own segment. We do the same if SECTIONS command is present in linker
// script. See comment for computeFlags().
Config->SingleRoRx = true;
expect("{");
std::vector<BaseCommand *> V;
while (!errorCount() && !consume("}")) {
StringRef Tok = next();
if (Tok == "OVERLAY") {
for (BaseCommand *Cmd : readOverlay())
V.push_back(Cmd);
continue;
+ } else if (Tok == "INCLUDE") {
+ readInclude();
+ continue;
}
if (BaseCommand *Cmd = readAssignment(Tok))
V.push_back(Cmd);
else
V.push_back(readOutputSectionDescription(Tok));
}
if (!atEOF() && consume("INSERT")) {
std::vector<BaseCommand *> *Dest = nullptr;
if (consume("AFTER"))
Dest = &Script->InsertAfterCommands[next()];
else if (consume("BEFORE"))
Dest = &Script->InsertBeforeCommands[next()];
else
setError("expected AFTER/BEFORE, but got '" + next() + "'");
if (Dest)
Dest->insert(Dest->end(), V.begin(), V.end());
return;
}
Script->SectionCommands.insert(Script->SectionCommands.end(), V.begin(),
V.end());
}
static int precedence(StringRef Op) {
return StringSwitch<int>(Op)
.Cases("*", "/", "%", 8)
.Cases("+", "-", 7)
.Cases("<<", ">>", 6)
.Cases("<", "<=", ">", ">=", "==", "!=", 5)
.Case("&", 4)
.Case("|", 3)
.Case("&&", 2)
.Case("||", 1)
.Default(-1);
}
StringMatcher ScriptParser::readFilePatterns() {
std::vector<StringRef> V;
while (!errorCount() && !consume(")"))
V.push_back(next());
return StringMatcher(V);
}
SortSectionPolicy ScriptParser::readSortKind() {
if (consume("SORT") || consume("SORT_BY_NAME"))
return SortSectionPolicy::Name;
if (consume("SORT_BY_ALIGNMENT"))
return SortSectionPolicy::Alignment;
if (consume("SORT_BY_INIT_PRIORITY"))
return SortSectionPolicy::Priority;
if (consume("SORT_NONE"))
return SortSectionPolicy::None;
return SortSectionPolicy::Default;
}
// Reads SECTIONS command contents in the following form:
//
// <contents> ::= <elem>*
// <elem> ::= <exclude>? <glob-pattern>
// <exclude> ::= "EXCLUDE_FILE" "(" <glob-pattern>+ ")"
//
// For example,
//
// *(.foo EXCLUDE_FILE (a.o) .bar EXCLUDE_FILE (b.o) .baz)
//
// is parsed as ".foo", ".bar" with "a.o", and ".baz" with "b.o".
// The semantics of that is section .foo in any file, section .bar in
// any file but a.o, and section .baz in any file but b.o.
std::vector<SectionPattern> ScriptParser::readInputSectionsList() {
std::vector<SectionPattern> Ret;
while (!errorCount() && peek() != ")") {
StringMatcher ExcludeFilePat;
if (consume("EXCLUDE_FILE")) {
expect("(");
ExcludeFilePat = readFilePatterns();
}
std::vector<StringRef> V;
while (!errorCount() && peek() != ")" && peek() != "EXCLUDE_FILE")
V.push_back(next());
if (!V.empty())
Ret.push_back({std::move(ExcludeFilePat), StringMatcher(V)});
else
setError("section pattern is expected");
}
return Ret;
}
// Reads contents of "SECTIONS" directive. That directive contains a
// list of glob patterns for input sections. The grammar is as follows.
//
// <patterns> ::= <section-list>
// | <sort> "(" <section-list> ")"
// | <sort> "(" <sort> "(" <section-list> ")" ")"
//
// <sort> ::= "SORT" | "SORT_BY_NAME" | "SORT_BY_ALIGNMENT"
// | "SORT_BY_INIT_PRIORITY" | "SORT_NONE"
//
// <section-list> is parsed by readInputSectionsList().
InputSectionDescription *
ScriptParser::readInputSectionRules(StringRef FilePattern) {
auto *Cmd = make<InputSectionDescription>(FilePattern);
expect("(");
while (!errorCount() && !consume(")")) {
SortSectionPolicy Outer = readSortKind();
SortSectionPolicy Inner = SortSectionPolicy::Default;
std::vector<SectionPattern> V;
if (Outer != SortSectionPolicy::Default) {
expect("(");
Inner = readSortKind();
if (Inner != SortSectionPolicy::Default) {
expect("(");
V = readInputSectionsList();
expect(")");
} else {
V = readInputSectionsList();
}
expect(")");
} else {
V = readInputSectionsList();
}
for (SectionPattern &Pat : V) {
Pat.SortInner = Inner;
Pat.SortOuter = Outer;
}
std::move(V.begin(), V.end(), std::back_inserter(Cmd->SectionPatterns));
}
return Cmd;
}
InputSectionDescription *
ScriptParser::readInputSectionDescription(StringRef Tok) {
// Input section wildcard can be surrounded by KEEP.
// https://sourceware.org/binutils/docs/ld/Input-Section-Keep.html#Input-Section-Keep
if (Tok == "KEEP") {
expect("(");
StringRef FilePattern = next();
InputSectionDescription *Cmd = readInputSectionRules(FilePattern);
expect(")");
Script->KeptSections.push_back(Cmd);
return Cmd;
}
return readInputSectionRules(Tok);
}
void ScriptParser::readSort() {
expect("(");
expect("CONSTRUCTORS");
expect(")");
}
Expr ScriptParser::readAssert() {
expect("(");
Expr E = readExpr();
expect(",");
StringRef Msg = unquote(next());
expect(")");
return [=] {
if (!E().getValue())
error(Msg);
return Script->getDot();
};
}
// Reads a FILL(expr) command. We handle the FILL command as an
// alias for =fillexp section attribute, which is different from
// what GNU linkers do.
// https://sourceware.org/binutils/docs/ld/Output-Section-Data.html
uint32_t ScriptParser::readFill() {
expect("(");
uint32_t V = parseFill(next());
expect(")");
return V;
}
// Reads an expression and/or the special directive for an output
// section definition. Directive is one of following: "(NOLOAD)",
// "(COPY)", "(INFO)" or "(OVERLAY)".
//
// An output section name can be followed by an address expression
// and/or directive. This grammar is not LL(1) because "(" can be
// interpreted as either the beginning of some expression or beginning
// of directive.
//
// https://sourceware.org/binutils/docs/ld/Output-Section-Address.html
// https://sourceware.org/binutils/docs/ld/Output-Section-Type.html
void ScriptParser::readSectionAddressType(OutputSection *Cmd) {
if (consume("(")) {
if (consume("NOLOAD")) {
expect(")");
Cmd->Noload = true;
return;
}
if (consume("COPY") || consume("INFO") || consume("OVERLAY")) {
expect(")");
Cmd->NonAlloc = true;
return;
}
Cmd->AddrExpr = readExpr();
expect(")");
} else {
Cmd->AddrExpr = readExpr();
}
if (consume("(")) {
expect("NOLOAD");
expect(")");
Cmd->Noload = true;
}
}
static Expr checkAlignment(Expr E, std::string &Loc) {
return [=] {
uint64_t Alignment = std::max((uint64_t)1, E().getValue());
if (!isPowerOf2_64(Alignment)) {
error(Loc + ": alignment must be power of 2");
return (uint64_t)1; // Return a dummy value.
}
return Alignment;
};
}
OutputSection *ScriptParser::readOverlaySectionDescription() {
OutputSection *Cmd =
Script->createOutputSection(next(), getCurrentLocation());
Cmd->InOverlay = true;
expect("{");
while (!errorCount() && !consume("}"))
Cmd->SectionCommands.push_back(readInputSectionRules(next()));
Cmd->Phdrs = readOutputSectionPhdrs();
return Cmd;
}
OutputSection *ScriptParser::readOutputSectionDescription(StringRef OutSec) {
OutputSection *Cmd =
Script->createOutputSection(OutSec, getCurrentLocation());
size_t SymbolsReferenced = Script->ReferencedSymbols.size();
if (peek() != ":")
readSectionAddressType(Cmd);
expect(":");
std::string Location = getCurrentLocation();
if (consume("AT"))
Cmd->LMAExpr = readParenExpr();
if (consume("ALIGN"))
Cmd->AlignExpr = checkAlignment(readParenExpr(), Location);
if (consume("SUBALIGN"))
Cmd->SubalignExpr = checkAlignment(readParenExpr(), Location);
// Parse constraints.
if (consume("ONLY_IF_RO"))
Cmd->Constraint = ConstraintKind::ReadOnly;
if (consume("ONLY_IF_RW"))
Cmd->Constraint = ConstraintKind::ReadWrite;
expect("{");
while (!errorCount() && !consume("}")) {
StringRef Tok = next();
if (Tok == ";") {
// Empty commands are allowed. Do nothing here.
} else if (SymbolAssignment *Assign = readAssignment(Tok)) {
Cmd->SectionCommands.push_back(Assign);
} else if (ByteCommand *Data = readByteCommand(Tok)) {
Cmd->SectionCommands.push_back(Data);
} else if (Tok == "CONSTRUCTORS") {
// CONSTRUCTORS is a keyword to make the linker recognize C++ ctors/dtors
// by name. This is for very old file formats such as ECOFF/XCOFF.
// For ELF, we should ignore.
} else if (Tok == "FILL") {
Cmd->Filler = readFill();
} else if (Tok == "SORT") {
readSort();
+ } else if (Tok == "INCLUDE") {
+ readInclude();
} else if (peek() == "(") {
Cmd->SectionCommands.push_back(readInputSectionDescription(Tok));
} else {
setError("unknown command " + Tok);
}
}
if (consume(">"))
Cmd->MemoryRegionName = next();
if (consume("AT")) {
expect(">");
Cmd->LMARegionName = next();
}
if (Cmd->LMAExpr && !Cmd->LMARegionName.empty())
error("section can't have both LMA and a load region");
Cmd->Phdrs = readOutputSectionPhdrs();
if (consume("="))
Cmd->Filler = parseFill(next());
else if (peek().startswith("="))
Cmd->Filler = parseFill(next().drop_front());
// Consume optional comma following output section command.
consume(",");
if (Script->ReferencedSymbols.size() > SymbolsReferenced)
Cmd->ExpressionsUseSymbols = true;
return Cmd;
}
// Parses a given string as a octal/decimal/hexadecimal number and
// returns it as a big-endian number. Used for `=<fillexp>`.
// https://sourceware.org/binutils/docs/ld/Output-Section-Fill.html
//
// When reading a hexstring, ld.bfd handles it as a blob of arbitrary
// size, while ld.gold always handles it as a 32-bit big-endian number.
// We are compatible with ld.gold because it's easier to implement.
uint32_t ScriptParser::parseFill(StringRef Tok) {
uint32_t V = 0;
if (!to_integer(Tok, V))
setError("invalid filler expression: " + Tok);
uint32_t Buf;
write32be(&Buf, V);
return Buf;
}
SymbolAssignment *ScriptParser::readProvideHidden(bool Provide, bool Hidden) {
expect("(");
SymbolAssignment *Cmd = readSymbolAssignment(next());
Cmd->Provide = Provide;
Cmd->Hidden = Hidden;
expect(")");
return Cmd;
}
SymbolAssignment *ScriptParser::readAssignment(StringRef Tok) {
// Assert expression returns Dot, so this is equal to ".=."
if (Tok == "ASSERT")
return make<SymbolAssignment>(".", readAssert(), getCurrentLocation());
size_t OldPos = Pos;
SymbolAssignment *Cmd = nullptr;
if (peek() == "=" || peek() == "+=")
Cmd = readSymbolAssignment(Tok);
else if (Tok == "PROVIDE")
Cmd = readProvideHidden(true, false);
else if (Tok == "HIDDEN")
Cmd = readProvideHidden(false, true);
else if (Tok == "PROVIDE_HIDDEN")
Cmd = readProvideHidden(true, true);
if (Cmd) {
Cmd->CommandString =
Tok.str() + " " +
llvm::join(Tokens.begin() + OldPos, Tokens.begin() + Pos, " ");
expect(";");
}
return Cmd;
}
SymbolAssignment *ScriptParser::readSymbolAssignment(StringRef Name) {
StringRef Op = next();
assert(Op == "=" || Op == "+=");
Expr E = readExpr();
if (Op == "+=") {
std::string Loc = getCurrentLocation();
E = [=] { return add(Script->getSymbolValue(Name, Loc), E()); };
}
return make<SymbolAssignment>(Name, E, getCurrentLocation());
}
// This is an operator-precedence parser to parse a linker
// script expression.
Expr ScriptParser::readExpr() {
// Our lexer is context-aware. Set the in-expression bit so that
// they apply different tokenization rules.
bool Orig = InExpr;
InExpr = true;
Expr E = readExpr1(readPrimary(), 0);
InExpr = Orig;
return E;
}
Expr ScriptParser::combine(StringRef Op, Expr L, Expr R) {
if (Op == "+")
return [=] { return add(L(), R()); };
if (Op == "-")
return [=] { return sub(L(), R()); };
if (Op == "*")
return [=] { return L().getValue() * R().getValue(); };
if (Op == "/") {
std::string Loc = getCurrentLocation();
return [=]() -> uint64_t {
if (uint64_t RV = R().getValue())
return L().getValue() / RV;
error(Loc + ": division by zero");
return 0;
};
}
if (Op == "%") {
std::string Loc = getCurrentLocation();
return [=]() -> uint64_t {
if (uint64_t RV = R().getValue())
return L().getValue() % RV;
error(Loc + ": modulo by zero");
return 0;
};
}
if (Op == "<<")
return [=] { return L().getValue() << R().getValue(); };
if (Op == ">>")
return [=] { return L().getValue() >> R().getValue(); };
if (Op == "<")
return [=] { return L().getValue() < R().getValue(); };
if (Op == ">")
return [=] { return L().getValue() > R().getValue(); };
if (Op == ">=")
return [=] { return L().getValue() >= R().getValue(); };
if (Op == "<=")
return [=] { return L().getValue() <= R().getValue(); };
if (Op == "==")
return [=] { return L().getValue() == R().getValue(); };
if (Op == "!=")
return [=] { return L().getValue() != R().getValue(); };
if (Op == "||")
return [=] { return L().getValue() || R().getValue(); };
if (Op == "&&")
return [=] { return L().getValue() && R().getValue(); };
if (Op == "&")
return [=] { return bitAnd(L(), R()); };
if (Op == "|")
return [=] { return bitOr(L(), R()); };
llvm_unreachable("invalid operator");
}
// This is a part of the operator-precedence parser. This function
// assumes that the remaining token stream starts with an operator.
Expr ScriptParser::readExpr1(Expr Lhs, int MinPrec) {
while (!atEOF() && !errorCount()) {
// Read an operator and an expression.
if (consume("?"))
return readTernary(Lhs);
StringRef Op1 = peek();
if (precedence(Op1) < MinPrec)
break;
skip();
Expr Rhs = readPrimary();
// Evaluate the remaining part of the expression first if the
// next operator has greater precedence than the previous one.
// For example, if we have read "+" and "3", and if the next
// operator is "*", then we'll evaluate 3 * ... part first.
while (!atEOF()) {
StringRef Op2 = peek();
if (precedence(Op2) <= precedence(Op1))
break;
Rhs = readExpr1(Rhs, precedence(Op2));
}
Lhs = combine(Op1, Lhs, Rhs);
}
return Lhs;
}
Expr ScriptParser::getPageSize() {
std::string Location = getCurrentLocation();
return [=]() -> uint64_t {
if (Target)
return Target->PageSize;
error(Location + ": unable to calculate page size");
return 4096; // Return a dummy value.
};
}
Expr ScriptParser::readConstant() {
StringRef S = readParenLiteral();
if (S == "COMMONPAGESIZE")
return getPageSize();
if (S == "MAXPAGESIZE")
return [] { return Config->MaxPageSize; };
setError("unknown constant: " + S);
return [] { return 0; };
}
// Parses Tok as an integer. It recognizes hexadecimal (prefixed with
// "0x" or suffixed with "H") and decimal numbers. Decimal numbers may
// have "K" (Ki) or "M" (Mi) suffixes.
static Optional<uint64_t> parseInt(StringRef Tok) {
// Hexadecimal
uint64_t Val;
if (Tok.startswith_lower("0x")) {
if (!to_integer(Tok.substr(2), Val, 16))
return None;
return Val;
}
if (Tok.endswith_lower("H")) {
if (!to_integer(Tok.drop_back(), Val, 16))
return None;
return Val;
}
// Decimal
if (Tok.endswith_lower("K")) {
if (!to_integer(Tok.drop_back(), Val, 10))
return None;
return Val * 1024;
}
if (Tok.endswith_lower("M")) {
if (!to_integer(Tok.drop_back(), Val, 10))
return None;
return Val * 1024 * 1024;
}
if (!to_integer(Tok, Val, 10))
return None;
return Val;
}
ByteCommand *ScriptParser::readByteCommand(StringRef Tok) {
int Size = StringSwitch<int>(Tok)
.Case("BYTE", 1)
.Case("SHORT", 2)
.Case("LONG", 4)
.Case("QUAD", 8)
.Default(-1);
if (Size == -1)
return nullptr;
size_t OldPos = Pos;
Expr E = readParenExpr();
std::string CommandString =
Tok.str() + " " +
llvm::join(Tokens.begin() + OldPos, Tokens.begin() + Pos, " ");
return make<ByteCommand>(E, Size, CommandString);
}
StringRef ScriptParser::readParenLiteral() {
expect("(");
bool Orig = InExpr;
InExpr = false;
StringRef Tok = next();
InExpr = Orig;
expect(")");
return Tok;
}
static void checkIfExists(OutputSection *Cmd, StringRef Location) {
if (Cmd->Location.empty() && Script->ErrorOnMissingSection)
error(Location + ": undefined section " + Cmd->Name);
}
Expr ScriptParser::readPrimary() {
if (peek() == "(")
return readParenExpr();
if (consume("~")) {
Expr E = readPrimary();
return [=] { return ~E().getValue(); };
}
if (consume("!")) {
Expr E = readPrimary();
return [=] { return !E().getValue(); };
}
if (consume("-")) {
Expr E = readPrimary();
return [=] { return -E().getValue(); };
}
StringRef Tok = next();
std::string Location = getCurrentLocation();
// Built-in functions are parsed here.
// https://sourceware.org/binutils/docs/ld/Builtin-Functions.html.
if (Tok == "ABSOLUTE") {
Expr Inner = readParenExpr();
return [=] {
ExprValue I = Inner();
I.ForceAbsolute = true;
return I;
};
}
if (Tok == "ADDR") {
StringRef Name = readParenLiteral();
OutputSection *Sec = Script->getOrCreateOutputSection(Name);
return [=]() -> ExprValue {
checkIfExists(Sec, Location);
return {Sec, false, 0, Location};
};
}
if (Tok == "ALIGN") {
expect("(");
Expr E = readExpr();
if (consume(")")) {
E = checkAlignment(E, Location);
return [=] { return alignTo(Script->getDot(), E().getValue()); };
}
expect(",");
Expr E2 = checkAlignment(readExpr(), Location);
expect(")");
return [=] {
ExprValue V = E();
V.Alignment = E2().getValue();
return V;
};
}
if (Tok == "ALIGNOF") {
StringRef Name = readParenLiteral();
OutputSection *Cmd = Script->getOrCreateOutputSection(Name);
return [=] {
checkIfExists(Cmd, Location);
return Cmd->Alignment;
};
}
if (Tok == "ASSERT")
return readAssert();
if (Tok == "CONSTANT")
return readConstant();
if (Tok == "DATA_SEGMENT_ALIGN") {
expect("(");
Expr E = readExpr();
expect(",");
readExpr();
expect(")");
return [=] {
return alignTo(Script->getDot(), std::max((uint64_t)1, E().getValue()));
};
}
if (Tok == "DATA_SEGMENT_END") {
expect("(");
expect(".");
expect(")");
return [] { return Script->getDot(); };
}
if (Tok == "DATA_SEGMENT_RELRO_END") {
// GNU linkers implements more complicated logic to handle
// DATA_SEGMENT_RELRO_END. We instead ignore the arguments and
// just align to the next page boundary for simplicity.
expect("(");
readExpr();
expect(",");
readExpr();
expect(")");
Expr E = getPageSize();
return [=] { return alignTo(Script->getDot(), E().getValue()); };
}
if (Tok == "DEFINED") {
StringRef Name = readParenLiteral();
return [=] { return Symtab->find(Name) ? 1 : 0; };
}
if (Tok == "LENGTH") {
StringRef Name = readParenLiteral();
if (Script->MemoryRegions.count(Name) == 0) {
setError("memory region not defined: " + Name);
return [] { return 0; };
}
return [=] { return Script->MemoryRegions[Name]->Length; };
}
if (Tok == "LOADADDR") {
StringRef Name = readParenLiteral();
OutputSection *Cmd = Script->getOrCreateOutputSection(Name);
return [=] {
checkIfExists(Cmd, Location);
return Cmd->getLMA();
};
}
if (Tok == "MAX" || Tok == "MIN") {
expect("(");
Expr A = readExpr();
expect(",");
Expr B = readExpr();
expect(")");
if (Tok == "MIN")
return [=] { return std::min(A().getValue(), B().getValue()); };
return [=] { return std::max(A().getValue(), B().getValue()); };
}
if (Tok == "ORIGIN") {
StringRef Name = readParenLiteral();
if (Script->MemoryRegions.count(Name) == 0) {
setError("memory region not defined: " + Name);
return [] { return 0; };
}
return [=] { return Script->MemoryRegions[Name]->Origin; };
}
if (Tok == "SEGMENT_START") {
expect("(");
skip();
expect(",");
Expr E = readExpr();
expect(")");
return [=] { return E(); };
}
if (Tok == "SIZEOF") {
StringRef Name = readParenLiteral();
OutputSection *Cmd = Script->getOrCreateOutputSection(Name);
// Linker script does not create an output section if its content is empty.
// We want to allow SIZEOF(.foo) where .foo is a section which happened to
// be empty.
return [=] { return Cmd->Size; };
}
if (Tok == "SIZEOF_HEADERS")
return [=] { return elf::getHeaderSize(); };
// Tok is the dot.
if (Tok == ".")
return [=] { return Script->getSymbolValue(Tok, Location); };
// Tok is a literal number.
if (Optional<uint64_t> Val = parseInt(Tok))
return [=] { return *Val; };
// Tok is a symbol name.
if (!isValidCIdentifier(Tok))
setError("malformed number: " + Tok);
Script->ReferencedSymbols.push_back(Tok);
return [=] { return Script->getSymbolValue(Tok, Location); };
}
Expr ScriptParser::readTernary(Expr Cond) {
Expr L = readExpr();
expect(":");
Expr R = readExpr();
return [=] { return Cond().getValue() ? L() : R(); };
}
Expr ScriptParser::readParenExpr() {
expect("(");
Expr E = readExpr();
expect(")");
return E;
}
std::vector<StringRef> ScriptParser::readOutputSectionPhdrs() {
std::vector<StringRef> Phdrs;
while (!errorCount() && peek().startswith(":")) {
StringRef Tok = next();
Phdrs.push_back((Tok.size() == 1) ? next() : Tok.substr(1));
}
return Phdrs;
}
// Read a program header type name. The next token must be a
// name of a program header type or a constant (e.g. "0x3").
unsigned ScriptParser::readPhdrType() {
StringRef Tok = next();
if (Optional<uint64_t> Val = parseInt(Tok))
return *Val;
unsigned Ret = StringSwitch<unsigned>(Tok)
.Case("PT_NULL", PT_NULL)
.Case("PT_LOAD", PT_LOAD)
.Case("PT_DYNAMIC", PT_DYNAMIC)
.Case("PT_INTERP", PT_INTERP)
.Case("PT_NOTE", PT_NOTE)
.Case("PT_SHLIB", PT_SHLIB)
.Case("PT_PHDR", PT_PHDR)
.Case("PT_TLS", PT_TLS)
.Case("PT_GNU_EH_FRAME", PT_GNU_EH_FRAME)
.Case("PT_GNU_STACK", PT_GNU_STACK)
.Case("PT_GNU_RELRO", PT_GNU_RELRO)
.Case("PT_OPENBSD_RANDOMIZE", PT_OPENBSD_RANDOMIZE)
.Case("PT_OPENBSD_WXNEEDED", PT_OPENBSD_WXNEEDED)
.Case("PT_OPENBSD_BOOTDATA", PT_OPENBSD_BOOTDATA)
.Default(-1);
if (Ret == (unsigned)-1) {
setError("invalid program header type: " + Tok);
return PT_NULL;
}
return Ret;
}
// Reads an anonymous version declaration.
void ScriptParser::readAnonymousDeclaration() {
std::vector<SymbolVersion> Locals;
std::vector<SymbolVersion> Globals;
std::tie(Locals, Globals) = readSymbols();
for (SymbolVersion V : Locals) {
if (V.Name == "*")
Config->DefaultSymbolVersion = VER_NDX_LOCAL;
else
Config->VersionScriptLocals.push_back(V);
}
for (SymbolVersion V : Globals)
Config->VersionScriptGlobals.push_back(V);
expect(";");
}
// Reads a non-anonymous version definition,
// e.g. "VerStr { global: foo; bar; local: *; };".
void ScriptParser::readVersionDeclaration(StringRef VerStr) {
// Read a symbol list.
std::vector<SymbolVersion> Locals;
std::vector<SymbolVersion> Globals;
std::tie(Locals, Globals) = readSymbols();
for (SymbolVersion V : Locals) {
if (V.Name == "*")
Config->DefaultSymbolVersion = VER_NDX_LOCAL;
else
Config->VersionScriptLocals.push_back(V);
}
// Create a new version definition and add that to the global symbols.
VersionDefinition Ver;
Ver.Name = VerStr;
Ver.Globals = Globals;
// User-defined version number starts from 2 because 0 and 1 are
// reserved for VER_NDX_LOCAL and VER_NDX_GLOBAL, respectively.
Ver.Id = Config->VersionDefinitions.size() + 2;
Config->VersionDefinitions.push_back(Ver);
// Each version may have a parent version. For example, "Ver2"
// defined as "Ver2 { global: foo; local: *; } Ver1;" has "Ver1"
// as a parent. This version hierarchy is, probably against your
// instinct, purely for hint; the runtime doesn't care about it
// at all. In LLD, we simply ignore it.
if (peek() != ";")
skip();
expect(";");
}
static bool hasWildcard(StringRef S) {
return S.find_first_of("?*[") != StringRef::npos;
}
// Reads a list of symbols, e.g. "{ global: foo; bar; local: *; };".
std::pair<std::vector<SymbolVersion>, std::vector<SymbolVersion>>
ScriptParser::readSymbols() {
std::vector<SymbolVersion> Locals;
std::vector<SymbolVersion> Globals;
std::vector<SymbolVersion> *V = &Globals;
while (!errorCount()) {
if (consume("}"))
break;
if (consumeLabel("local")) {
V = &Locals;
continue;
}
if (consumeLabel("global")) {
V = &Globals;
continue;
}
if (consume("extern")) {
std::vector<SymbolVersion> Ext = readVersionExtern();
V->insert(V->end(), Ext.begin(), Ext.end());
} else {
StringRef Tok = next();
V->push_back({unquote(Tok), false, hasWildcard(Tok)});
}
expect(";");
}
return {Locals, Globals};
}
// Reads an "extern C++" directive, e.g.,
// "extern "C++" { ns::*; "f(int, double)"; };"
//
// The last semicolon is optional. E.g. this is OK:
// "extern "C++" { ns::*; "f(int, double)" };"
std::vector<SymbolVersion> ScriptParser::readVersionExtern() {
StringRef Tok = next();
bool IsCXX = Tok == "\"C++\"";
if (!IsCXX && Tok != "\"C\"")
setError("Unknown language");
expect("{");
std::vector<SymbolVersion> Ret;
while (!errorCount() && peek() != "}") {
StringRef Tok = next();
bool HasWildcard = !Tok.startswith("\"") && hasWildcard(Tok);
Ret.push_back({unquote(Tok), IsCXX, HasWildcard});
if (consume("}"))
return Ret;
expect(";");
}
expect("}");
return Ret;
}
uint64_t ScriptParser::readMemoryAssignment(StringRef S1, StringRef S2,
StringRef S3) {
if (!consume(S1) && !consume(S2) && !consume(S3)) {
setError("expected one of: " + S1 + ", " + S2 + ", or " + S3);
return 0;
}
expect("=");
return readExpr()().getValue();
}
// Parse the MEMORY command as specified in:
// https://sourceware.org/binutils/docs/ld/MEMORY.html
//
// MEMORY { name [(attr)] : ORIGIN = origin, LENGTH = len ... }
void ScriptParser::readMemory() {
expect("{");
while (!errorCount() && !consume("}")) {
- StringRef Name = next();
+ StringRef Tok = next();
+ if (Tok == "INCLUDE") {
+ readInclude();
+ continue;
+ }
uint32_t Flags = 0;
uint32_t NegFlags = 0;
if (consume("(")) {
std::tie(Flags, NegFlags) = readMemoryAttributes();
expect(")");
}
expect(":");
uint64_t Origin = readMemoryAssignment("ORIGIN", "org", "o");
expect(",");
uint64_t Length = readMemoryAssignment("LENGTH", "len", "l");
// Add the memory region to the region map.
- MemoryRegion *MR =
- make<MemoryRegion>(Name, Origin, Length, Flags, NegFlags);
- if (!Script->MemoryRegions.insert({Name, MR}).second)
- setError("region '" + Name + "' already defined");
+ MemoryRegion *MR = make<MemoryRegion>(Tok, Origin, Length, Flags, NegFlags);
+ if (!Script->MemoryRegions.insert({Tok, MR}).second)
+ setError("region '" + Tok + "' already defined");
}
}
// This function parses the attributes used to match against section
// flags when placing output sections in a memory region. These flags
// are only used when an explicit memory region name is not used.
std::pair<uint32_t, uint32_t> ScriptParser::readMemoryAttributes() {
uint32_t Flags = 0;
uint32_t NegFlags = 0;
bool Invert = false;
for (char C : next().lower()) {
uint32_t Flag = 0;
if (C == '!')
Invert = !Invert;
else if (C == 'w')
Flag = SHF_WRITE;
else if (C == 'x')
Flag = SHF_EXECINSTR;
else if (C == 'a')
Flag = SHF_ALLOC;
else if (C != 'r')
setError("invalid memory region attribute");
if (Invert)
NegFlags |= Flag;
else
Flags |= Flag;
}
return {Flags, NegFlags};
}
void elf::readLinkerScript(MemoryBufferRef MB) {
ScriptParser(MB).readLinkerScript();
}
void elf::readVersionScript(MemoryBufferRef MB) {
ScriptParser(MB).readVersionScript();
}
void elf::readDynamicList(MemoryBufferRef MB) {
ScriptParser(MB).readDynamicList();
}
void elf::readDefsym(StringRef Name, MemoryBufferRef MB) {
ScriptParser(MB).readDefsym(Name);
}
Index: projects/clang700-import/contrib/llvm/tools/lld/ELF/Symbols.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/tools/lld/ELF/Symbols.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/lld/ELF/Symbols.cpp (revision 340125)
@@ -1,273 +1,273 @@
//===- Symbols.cpp --------------------------------------------------------===//
//
// The LLVM Linker
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "Symbols.h"
#include "InputFiles.h"
#include "InputSection.h"
#include "OutputSections.h"
#include "SyntheticSections.h"
#include "Target.h"
#include "Writer.h"
#include "lld/Common/ErrorHandler.h"
#include "lld/Common/Strings.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Path.h"
#include <cstring>
using namespace llvm;
using namespace llvm::object;
using namespace llvm::ELF;
using namespace lld;
using namespace lld::elf;
Defined *ElfSym::Bss;
Defined *ElfSym::Etext1;
Defined *ElfSym::Etext2;
Defined *ElfSym::Edata1;
Defined *ElfSym::Edata2;
Defined *ElfSym::End1;
Defined *ElfSym::End2;
Defined *ElfSym::GlobalOffsetTable;
Defined *ElfSym::MipsGp;
Defined *ElfSym::MipsGpDisp;
Defined *ElfSym::MipsLocalGp;
Defined *ElfSym::RelaIpltEnd;
static uint64_t getSymVA(const Symbol &Sym, int64_t &Addend) {
switch (Sym.kind()) {
case Symbol::DefinedKind: {
auto &D = cast<Defined>(Sym);
SectionBase *IS = D.Section;
// According to the ELF spec reference to a local symbol from outside
// the group are not allowed. Unfortunately .eh_frame breaks that rule
// and must be treated specially. For now we just replace the symbol with
// 0.
if (IS == &InputSection::Discarded)
return 0;
// This is an absolute symbol.
if (!IS)
return D.Value;
IS = IS->Repl;
uint64_t Offset = D.Value;
// An object in an SHF_MERGE section might be referenced via a
// section symbol (as a hack for reducing the number of local
// symbols).
// Depending on the addend, the reference via a section symbol
// refers to a different object in the merge section.
// Since the objects in the merge section are not necessarily
// contiguous in the output, the addend can thus affect the final
// VA in a non-linear way.
// To make this work, we incorporate the addend into the section
// offset (and zero out the addend for later processing) so that
// we find the right object in the section.
if (D.isSection()) {
Offset += Addend;
Addend = 0;
}
// In the typical case, this is actually very simple and boils
// down to adding together 3 numbers:
// 1. The address of the output section.
// 2. The offset of the input section within the output section.
// 3. The offset within the input section (this addition happens
// inside InputSection::getOffset).
//
// If you understand the data structures involved with this next
// line (and how they get built), then you have a pretty good
// understanding of the linker.
uint64_t VA = IS->getVA(Offset);
if (D.isTls() && !Config->Relocatable) {
if (!Out::TlsPhdr)
fatal(toString(D.File) +
" has an STT_TLS symbol but doesn't have an SHF_TLS section");
return VA - Out::TlsPhdr->p_vaddr;
}
return VA;
}
case Symbol::SharedKind:
case Symbol::UndefinedKind:
return 0;
case Symbol::LazyArchiveKind:
case Symbol::LazyObjectKind:
llvm_unreachable("lazy symbol reached writer");
}
llvm_unreachable("invalid symbol kind");
}
uint64_t Symbol::getVA(int64_t Addend) const {
uint64_t OutVA = getSymVA(*this, Addend);
return OutVA + Addend;
}
uint64_t Symbol::getGotVA() const { return InX::Got->getVA() + getGotOffset(); }
uint64_t Symbol::getGotOffset() const {
return GotIndex * Target->GotEntrySize;
}
uint64_t Symbol::getGotPltVA() const {
if (this->IsInIgot)
return InX::IgotPlt->getVA() + getGotPltOffset();
return InX::GotPlt->getVA() + getGotPltOffset();
}
uint64_t Symbol::getGotPltOffset() const {
if (IsInIgot)
return PltIndex * Target->GotPltEntrySize;
return (PltIndex + Target->GotPltHeaderEntriesNum) * Target->GotPltEntrySize;
}
uint64_t Symbol::getPltVA() const {
if (this->IsInIplt)
return InX::Iplt->getVA() + PltIndex * Target->PltEntrySize;
return InX::Plt->getVA() + Target->getPltEntryOffset(PltIndex);
}
uint64_t Symbol::getPltOffset() const {
assert(!this->IsInIplt);
return Target->getPltEntryOffset(PltIndex);
}
uint64_t Symbol::getSize() const {
if (const auto *DR = dyn_cast<Defined>(this))
return DR->Size;
return cast<SharedSymbol>(this)->Size;
}
OutputSection *Symbol::getOutputSection() const {
if (auto *S = dyn_cast<Defined>(this)) {
if (auto *Sec = S->Section)
return Sec->Repl->getOutputSection();
return nullptr;
}
return nullptr;
}
// If a symbol name contains '@', the characters after that is
// a symbol version name. This function parses that.
void Symbol::parseSymbolVersion() {
StringRef S = getName();
size_t Pos = S.find('@');
if (Pos == 0 || Pos == StringRef::npos)
return;
StringRef Verstr = S.substr(Pos + 1);
if (Verstr.empty())
return;
// Truncate the symbol name so that it doesn't include the version string.
NameSize = Pos;
// If this is not in this DSO, it is not a definition.
if (!isDefined())
return;
// '@@' in a symbol name means the default version.
// It is usually the most recent one.
bool IsDefault = (Verstr[0] == '@');
if (IsDefault)
Verstr = Verstr.substr(1);
for (VersionDefinition &Ver : Config->VersionDefinitions) {
if (Ver.Name != Verstr)
continue;
if (IsDefault)
VersionId = Ver.Id;
else
VersionId = Ver.Id | VERSYM_HIDDEN;
return;
}
// It is an error if the specified version is not defined.
// Usually version script is not provided when linking executable,
// but we may still want to override a versioned symbol from DSO,
// so we do not report error in this case. We also do not error
// if the symbol has a local version as it won't be in the dynamic
// symbol table.
if (Config->Shared && VersionId != VER_NDX_LOCAL)
error(toString(File) + ": symbol " + S + " has undefined version " +
Verstr);
}
InputFile *LazyArchive::fetch() { return cast<ArchiveFile>(File)->fetch(Sym); }
uint8_t Symbol::computeBinding() const {
if (Config->Relocatable)
return Binding;
if (Visibility != STV_DEFAULT && Visibility != STV_PROTECTED)
return STB_LOCAL;
- if (VersionId == VER_NDX_LOCAL && isDefined())
+ if (VersionId == VER_NDX_LOCAL && isDefined() && !IsPreemptible)
return STB_LOCAL;
if (!Config->GnuUnique && Binding == STB_GNU_UNIQUE)
return STB_GLOBAL;
return Binding;
}
bool Symbol::includeInDynsym() const {
if (!Config->HasDynSymTab)
return false;
if (computeBinding() == STB_LOCAL)
return false;
if (!isDefined())
return true;
return ExportDynamic;
}
// Print out a log message for --trace-symbol.
void elf::printTraceSymbol(Symbol *Sym) {
std::string S;
if (Sym->isUndefined())
S = ": reference to ";
else if (Sym->isLazy())
S = ": lazy definition of ";
else if (Sym->isShared())
S = ": shared definition of ";
else if (dyn_cast_or_null<BssSection>(cast<Defined>(Sym)->Section))
S = ": common definition of ";
else
S = ": definition of ";
message(toString(Sym->File) + S + Sym->getName());
}
void elf::warnUnorderableSymbol(const Symbol *Sym) {
if (!Config->WarnSymbolOrdering)
return;
const InputFile *File = Sym->File;
auto *D = dyn_cast<Defined>(Sym);
auto Warn = [&](StringRef S) { warn(toString(File) + S + Sym->getName()); };
if (Sym->isUndefined())
Warn(": unable to order undefined symbol: ");
else if (Sym->isShared())
Warn(": unable to order shared symbol: ");
else if (D && !D->Section)
Warn(": unable to order absolute symbol: ");
else if (D && isa<OutputSection>(D->Section))
Warn(": unable to order synthetic symbol: ");
else if (D && !D->Section->Repl->Live)
Warn(": unable to order discarded symbol: ");
}
// Returns a symbol for an error message.
std::string lld::toString(const Symbol &B) {
if (Config->Demangle)
if (Optional<std::string> S = demangleItanium(B.getName()))
return *S;
return B.getName();
}
Index: projects/clang700-import/contrib/llvm/tools/lld
===================================================================
--- projects/clang700-import/contrib/llvm/tools/lld (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/lld (revision 340125)
Property changes on: projects/clang700-import/contrib/llvm/tools/lld
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /vendor/lld/dist-release_70:r338729-340124
Index: projects/clang700-import/contrib/llvm/tools/lldb
===================================================================
--- projects/clang700-import/contrib/llvm/tools/lldb (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/lldb (revision 340125)
Property changes on: projects/clang700-import/contrib/llvm/tools/lldb
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /vendor/lldb/dist-release_70:r338729-340124
Index: projects/clang700-import/contrib/llvm/tools/llvm-xray/xray-account.cpp
===================================================================
--- projects/clang700-import/contrib/llvm/tools/llvm-xray/xray-account.cpp (revision 340124)
+++ projects/clang700-import/contrib/llvm/tools/llvm-xray/xray-account.cpp (revision 340125)
@@ -1,510 +1,513 @@
//===- xray-account.h - XRay Function Call Accounting ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements basic function call accounting from an XRay trace.
//
//===----------------------------------------------------------------------===//
#include <algorithm>
#include <cassert>
#include <numeric>
#include <system_error>
#include <utility>
#include "xray-account.h"
#include "xray-registry.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/XRay/InstrumentationMap.h"
#include "llvm/XRay/Trace.h"
using namespace llvm;
using namespace llvm::xray;
static cl::SubCommand Account("account", "Function call accounting");
static cl::opt<std::string> AccountInput(cl::Positional,
cl::desc("<xray log file>"),
cl::Required, cl::sub(Account));
static cl::opt<bool>
AccountKeepGoing("keep-going", cl::desc("Keep going on errors encountered"),
cl::sub(Account), cl::init(false));
static cl::alias AccountKeepGoing2("k", cl::aliasopt(AccountKeepGoing),
cl::desc("Alias for -keep_going"),
cl::sub(Account));
static cl::opt<bool> AccountDeduceSiblingCalls(
"deduce-sibling-calls",
cl::desc("Deduce sibling calls when unrolling function call stacks"),
cl::sub(Account), cl::init(false));
static cl::alias
AccountDeduceSiblingCalls2("d", cl::aliasopt(AccountDeduceSiblingCalls),
cl::desc("Alias for -deduce_sibling_calls"),
cl::sub(Account));
static cl::opt<std::string>
AccountOutput("output", cl::value_desc("output file"), cl::init("-"),
cl::desc("output file; use '-' for stdout"),
cl::sub(Account));
static cl::alias AccountOutput2("o", cl::aliasopt(AccountOutput),
cl::desc("Alias for -output"),
cl::sub(Account));
enum class AccountOutputFormats { TEXT, CSV };
static cl::opt<AccountOutputFormats>
AccountOutputFormat("format", cl::desc("output format"),
cl::values(clEnumValN(AccountOutputFormats::TEXT,
"text", "report stats in text"),
clEnumValN(AccountOutputFormats::CSV, "csv",
"report stats in csv")),
cl::sub(Account));
static cl::alias AccountOutputFormat2("f", cl::desc("Alias of -format"),
cl::aliasopt(AccountOutputFormat),
cl::sub(Account));
enum class SortField {
FUNCID,
COUNT,
MIN,
MED,
PCT90,
PCT99,
MAX,
SUM,
FUNC,
};
static cl::opt<SortField> AccountSortOutput(
"sort", cl::desc("sort output by this field"), cl::value_desc("field"),
cl::sub(Account), cl::init(SortField::FUNCID),
cl::values(clEnumValN(SortField::FUNCID, "funcid", "function id"),
clEnumValN(SortField::COUNT, "count", "funciton call counts"),
clEnumValN(SortField::MIN, "min", "minimum function durations"),
clEnumValN(SortField::MED, "med", "median function durations"),
clEnumValN(SortField::PCT90, "90p", "90th percentile durations"),
clEnumValN(SortField::PCT99, "99p", "99th percentile durations"),
clEnumValN(SortField::MAX, "max", "maximum function durations"),
clEnumValN(SortField::SUM, "sum", "sum of call durations"),
clEnumValN(SortField::FUNC, "func", "function names")));
static cl::alias AccountSortOutput2("s", cl::aliasopt(AccountSortOutput),
cl::desc("Alias for -sort"),
cl::sub(Account));
enum class SortDirection {
ASCENDING,
DESCENDING,
};
static cl::opt<SortDirection> AccountSortOrder(
"sortorder", cl::desc("sort ordering"), cl::init(SortDirection::ASCENDING),
cl::values(clEnumValN(SortDirection::ASCENDING, "asc", "ascending"),
clEnumValN(SortDirection::DESCENDING, "dsc", "descending")),
cl::sub(Account));
static cl::alias AccountSortOrder2("r", cl::aliasopt(AccountSortOrder),
cl::desc("Alias for -sortorder"),
cl::sub(Account));
static cl::opt<int> AccountTop("top", cl::desc("only show the top N results"),
cl::value_desc("N"), cl::sub(Account),
cl::init(-1));
static cl::alias AccountTop2("p", cl::desc("Alias for -top"),
cl::aliasopt(AccountTop), cl::sub(Account));
static cl::opt<std::string>
AccountInstrMap("instr_map",
cl::desc("binary with the instrumentation map, or "
"a separate instrumentation map"),
cl::value_desc("binary with xray_instr_map"),
cl::sub(Account), cl::init(""));
static cl::alias AccountInstrMap2("m", cl::aliasopt(AccountInstrMap),
cl::desc("Alias for -instr_map"),
cl::sub(Account));
namespace {
template <class T, class U> void setMinMax(std::pair<T, T> &MM, U &&V) {
if (MM.first == 0 || MM.second == 0)
MM = std::make_pair(std::forward<U>(V), std::forward<U>(V));
else
MM = std::make_pair(std::min(MM.first, V), std::max(MM.second, V));
}
template <class T> T diff(T L, T R) { return std::max(L, R) - std::min(L, R); }
} // namespace
bool LatencyAccountant::accountRecord(const XRayRecord &Record) {
setMinMax(PerThreadMinMaxTSC[Record.TId], Record.TSC);
setMinMax(PerCPUMinMaxTSC[Record.CPU], Record.TSC);
if (CurrentMaxTSC == 0)
CurrentMaxTSC = Record.TSC;
if (Record.TSC < CurrentMaxTSC)
return false;
auto &ThreadStack = PerThreadFunctionStack[Record.TId];
switch (Record.Type) {
case RecordTypes::ENTER:
case RecordTypes::ENTER_ARG: {
ThreadStack.emplace_back(Record.FuncId, Record.TSC);
break;
}
case RecordTypes::EXIT:
case RecordTypes::TAIL_EXIT: {
if (ThreadStack.empty())
return false;
if (ThreadStack.back().first == Record.FuncId) {
const auto &Top = ThreadStack.back();
recordLatency(Top.first, diff(Top.second, Record.TSC));
ThreadStack.pop_back();
break;
}
if (!DeduceSiblingCalls)
return false;
// Look for the parent up the stack.
auto Parent =
std::find_if(ThreadStack.rbegin(), ThreadStack.rend(),
[&](const std::pair<const int32_t, uint64_t> &E) {
return E.first == Record.FuncId;
});
if (Parent == ThreadStack.rend())
return false;
// Account time for this apparently sibling call exit up the stack.
// Considering the following case:
//
// f()
// g()
// h()
//
// We might only ever see the following entries:
//
// -> f()
// -> g()
// -> h()
// <- h()
// <- f()
//
// Now we don't see the exit to g() because some older version of the XRay
// runtime wasn't instrumenting tail exits. If we don't deduce tail calls,
// we may potentially never account time for g() -- and this code would have
// already bailed out, because `<- f()` doesn't match the current "top" of
// stack where we're waiting for the exit to `g()` instead. This is not
// ideal and brittle -- so instead we provide a potentially inaccurate
// accounting of g() instead, computing it from the exit of f().
//
// While it might be better that we account the time between `-> g()` and
// `-> h()` as the proper accounting of time for g() here, this introduces
// complexity to do correctly (need to backtrack, etc.).
//
// FIXME: Potentially implement the more complex deduction algorithm?
auto I = std::next(Parent).base();
for (auto &E : make_range(I, ThreadStack.end())) {
recordLatency(E.first, diff(E.second, Record.TSC));
}
ThreadStack.erase(I, ThreadStack.end());
break;
}
}
return true;
}
namespace {
// We consolidate the data into a struct which we can output in various forms.
struct ResultRow {
uint64_t Count;
double Min;
double Median;
double Pct90;
double Pct99;
double Max;
double Sum;
std::string DebugInfo;
std::string Function;
};
ResultRow getStats(std::vector<uint64_t> &Timings) {
assert(!Timings.empty());
ResultRow R;
R.Sum = std::accumulate(Timings.begin(), Timings.end(), 0.0);
auto MinMax = std::minmax_element(Timings.begin(), Timings.end());
R.Min = *MinMax.first;
R.Max = *MinMax.second;
R.Count = Timings.size();
auto MedianOff = Timings.size() / 2;
std::nth_element(Timings.begin(), Timings.begin() + MedianOff, Timings.end());
R.Median = Timings[MedianOff];
auto Pct90Off = std::floor(Timings.size() * 0.9);
std::nth_element(Timings.begin(), Timings.begin() + Pct90Off, Timings.end());
R.Pct90 = Timings[Pct90Off];
auto Pct99Off = std::floor(Timings.size() * 0.99);
std::nth_element(Timings.begin(), Timings.begin() + Pct99Off, Timings.end());
R.Pct99 = Timings[Pct99Off];
return R;
}
} // namespace
template <class F>
void LatencyAccountant::exportStats(const XRayFileHeader &Header, F Fn) const {
using TupleType = std::tuple<int32_t, uint64_t, ResultRow>;
std::vector<TupleType> Results;
Results.reserve(FunctionLatencies.size());
for (auto FT : FunctionLatencies) {
const auto &FuncId = FT.first;
auto &Timings = FT.second;
Results.emplace_back(FuncId, Timings.size(), getStats(Timings));
auto &Row = std::get<2>(Results.back());
if (Header.CycleFrequency) {
double CycleFrequency = Header.CycleFrequency;
Row.Min /= CycleFrequency;
Row.Median /= CycleFrequency;
Row.Pct90 /= CycleFrequency;
Row.Pct99 /= CycleFrequency;
Row.Max /= CycleFrequency;
Row.Sum /= CycleFrequency;
}
Row.Function = FuncIdHelper.SymbolOrNumber(FuncId);
Row.DebugInfo = FuncIdHelper.FileLineAndColumn(FuncId);
}
// Sort the data according to user-provided flags.
switch (AccountSortOutput) {
case SortField::FUNCID:
llvm::sort(Results.begin(), Results.end(),
[](const TupleType &L, const TupleType &R) {
if (AccountSortOrder == SortDirection::ASCENDING)
return std::get<0>(L) < std::get<0>(R);
if (AccountSortOrder == SortDirection::DESCENDING)
return std::get<0>(L) > std::get<0>(R);
llvm_unreachable("Unknown sort direction");
});
break;
case SortField::COUNT:
llvm::sort(Results.begin(), Results.end(),
[](const TupleType &L, const TupleType &R) {
if (AccountSortOrder == SortDirection::ASCENDING)
return std::get<1>(L) < std::get<1>(R);
if (AccountSortOrder == SortDirection::DESCENDING)
return std::get<1>(L) > std::get<1>(R);
llvm_unreachable("Unknown sort direction");
});
break;
default:
// Here we need to look into the ResultRow for the rest of the data that
// we want to sort by.
llvm::sort(Results.begin(), Results.end(),
[&](const TupleType &L, const TupleType &R) {
auto &LR = std::get<2>(L);
auto &RR = std::get<2>(R);
switch (AccountSortOutput) {
case SortField::COUNT:
if (AccountSortOrder == SortDirection::ASCENDING)
return LR.Count < RR.Count;
if (AccountSortOrder == SortDirection::DESCENDING)
return LR.Count > RR.Count;
llvm_unreachable("Unknown sort direction");
case SortField::MIN:
if (AccountSortOrder == SortDirection::ASCENDING)
return LR.Min < RR.Min;
if (AccountSortOrder == SortDirection::DESCENDING)
return LR.Min > RR.Min;
llvm_unreachable("Unknown sort direction");
case SortField::MED:
if (AccountSortOrder == SortDirection::ASCENDING)
return LR.Median < RR.Median;
if (AccountSortOrder == SortDirection::DESCENDING)
return LR.Median > RR.Median;
llvm_unreachable("Unknown sort direction");
case SortField::PCT90:
if (AccountSortOrder == SortDirection::ASCENDING)
return LR.Pct90 < RR.Pct90;
if (AccountSortOrder == SortDirection::DESCENDING)
return LR.Pct90 > RR.Pct90;
llvm_unreachable("Unknown sort direction");
case SortField::PCT99:
if (AccountSortOrder == SortDirection::ASCENDING)
return LR.Pct99 < RR.Pct99;
if (AccountSortOrder == SortDirection::DESCENDING)
return LR.Pct99 > RR.Pct99;
llvm_unreachable("Unknown sort direction");
case SortField::MAX:
if (AccountSortOrder == SortDirection::ASCENDING)
return LR.Max < RR.Max;
if (AccountSortOrder == SortDirection::DESCENDING)
return LR.Max > RR.Max;
llvm_unreachable("Unknown sort direction");
case SortField::SUM:
if (AccountSortOrder == SortDirection::ASCENDING)
return LR.Sum < RR.Sum;
if (AccountSortOrder == SortDirection::DESCENDING)
return LR.Sum > RR.Sum;
llvm_unreachable("Unknown sort direction");
default:
llvm_unreachable("Unsupported sort order");
}
});
break;
}
- if (AccountTop > 0)
- Results.erase(Results.begin() + AccountTop.getValue(), Results.end());
+ if (AccountTop > 0) {
+ auto MaxTop =
+ std::min(AccountTop.getValue(), static_cast<int>(Results.size()));
+ Results.erase(Results.begin() + MaxTop, Results.end());
+ }
for (const auto &R : Results)
Fn(std::get<0>(R), std::get<1>(R), std::get<2>(R));
}
void LatencyAccountant::exportStatsAsText(raw_ostream &OS,
const XRayFileHeader &Header) const {
OS << "Functions with latencies: " << FunctionLatencies.size() << "\n";
// We spend some effort to make the text output more readable, so we do the
// following formatting decisions for each of the fields:
//
// - funcid: 32-bit, but we can determine the largest number and be
// between
// a minimum of 5 characters, up to 9 characters, right aligned.
// - count: 64-bit, but we can determine the largest number and be
// between
// a minimum of 5 characters, up to 9 characters, right aligned.
// - min, median, 90pct, 99pct, max: double precision, but we want to keep
// the values in seconds, with microsecond precision (0.000'001), so we
// have at most 6 significant digits, with the whole number part to be
// at
// least 1 character. For readability we'll right-align, with full 9
// characters each.
// - debug info, function name: we format this as a concatenation of the
// debug info and the function name.
//
static constexpr char StatsHeaderFormat[] =
"{0,+9} {1,+10} [{2,+9}, {3,+9}, {4,+9}, {5,+9}, {6,+9}] {7,+9}";
static constexpr char StatsFormat[] =
R"({0,+9} {1,+10} [{2,+9:f6}, {3,+9:f6}, {4,+9:f6}, {5,+9:f6}, {6,+9:f6}] {7,+9:f6})";
OS << llvm::formatv(StatsHeaderFormat, "funcid", "count", "min", "med", "90p",
"99p", "max", "sum")
<< llvm::formatv(" {0,-12}\n", "function");
exportStats(Header, [&](int32_t FuncId, size_t Count, const ResultRow &Row) {
OS << llvm::formatv(StatsFormat, FuncId, Count, Row.Min, Row.Median,
Row.Pct90, Row.Pct99, Row.Max, Row.Sum)
<< " " << Row.DebugInfo << ": " << Row.Function << "\n";
});
}
void LatencyAccountant::exportStatsAsCSV(raw_ostream &OS,
const XRayFileHeader &Header) const {
OS << "funcid,count,min,median,90%ile,99%ile,max,sum,debug,function\n";
exportStats(Header, [&](int32_t FuncId, size_t Count, const ResultRow &Row) {
OS << FuncId << ',' << Count << ',' << Row.Min << ',' << Row.Median << ','
<< Row.Pct90 << ',' << Row.Pct99 << ',' << Row.Max << "," << Row.Sum
<< ",\"" << Row.DebugInfo << "\",\"" << Row.Function << "\"\n";
});
}
using namespace llvm::xray;
namespace llvm {
template <> struct format_provider<llvm::xray::RecordTypes> {
static void format(const llvm::xray::RecordTypes &T, raw_ostream &Stream,
StringRef Style) {
switch(T) {
case RecordTypes::ENTER:
Stream << "enter";
break;
case RecordTypes::ENTER_ARG:
Stream << "enter-arg";
break;
case RecordTypes::EXIT:
Stream << "exit";
break;
case RecordTypes::TAIL_EXIT:
Stream << "tail-exit";
break;
}
}
};
} // namespace llvm
static CommandRegistration Unused(&Account, []() -> Error {
InstrumentationMap Map;
if (!AccountInstrMap.empty()) {
auto InstrumentationMapOrError = loadInstrumentationMap(AccountInstrMap);
if (!InstrumentationMapOrError)
return joinErrors(make_error<StringError>(
Twine("Cannot open instrumentation map '") +
AccountInstrMap + "'",
std::make_error_code(std::errc::invalid_argument)),
InstrumentationMapOrError.takeError());
Map = std::move(*InstrumentationMapOrError);
}
std::error_code EC;
raw_fd_ostream OS(AccountOutput, EC, sys::fs::OpenFlags::F_Text);
if (EC)
return make_error<StringError>(
Twine("Cannot open file '") + AccountOutput + "' for writing.", EC);
const auto &FunctionAddresses = Map.getFunctionAddresses();
symbolize::LLVMSymbolizer::Options Opts(
symbolize::FunctionNameKind::LinkageName, true, true, false, "");
symbolize::LLVMSymbolizer Symbolizer(Opts);
llvm::xray::FuncIdConversionHelper FuncIdHelper(AccountInstrMap, Symbolizer,
FunctionAddresses);
xray::LatencyAccountant FCA(FuncIdHelper, AccountDeduceSiblingCalls);
auto TraceOrErr = loadTraceFile(AccountInput);
if (!TraceOrErr)
return joinErrors(
make_error<StringError>(
Twine("Failed loading input file '") + AccountInput + "'",
std::make_error_code(std::errc::executable_format_error)),
TraceOrErr.takeError());
auto &T = *TraceOrErr;
for (const auto &Record : T) {
if (FCA.accountRecord(Record))
continue;
errs()
<< "Error processing record: "
<< llvm::formatv(
R"({{type: {0}; cpu: {1}; record-type: {2}; function-id: {3}; tsc: {4}; thread-id: {5}; process-id: {6}}})",
Record.RecordType, Record.CPU, Record.Type, Record.FuncId,
Record.TSC, Record.TId, Record.PId)
<< '\n';
for (const auto &ThreadStack : FCA.getPerThreadFunctionStack()) {
errs() << "Thread ID: " << ThreadStack.first << "\n";
if (ThreadStack.second.empty()) {
errs() << " (empty stack)\n";
continue;
}
auto Level = ThreadStack.second.size();
for (const auto &Entry : llvm::reverse(ThreadStack.second))
errs() << " #" << Level-- << "\t"
<< FuncIdHelper.SymbolOrNumber(Entry.first) << '\n';
}
if (!AccountKeepGoing)
return make_error<StringError>(
Twine("Failed accounting function calls in file '") + AccountInput +
"'.",
std::make_error_code(std::errc::executable_format_error));
}
switch (AccountOutputFormat) {
case AccountOutputFormats::TEXT:
FCA.exportStatsAsText(OS, T.getFileHeader());
break;
case AccountOutputFormats::CSV:
FCA.exportStatsAsCSV(OS, T.getFileHeader());
break;
}
return Error::success();
});
Index: projects/clang700-import/contrib/llvm
===================================================================
--- projects/clang700-import/contrib/llvm (revision 340124)
+++ projects/clang700-import/contrib/llvm (revision 340125)
Property changes on: projects/clang700-import/contrib/llvm
___________________________________________________________________
Modified: svn:mergeinfo
## -0,0 +0,1 ##
Merged /vendor/llvm/dist-release_70:r338727-340124
Index: projects/clang700-import/etc/mtree/BSD.debug.dist
===================================================================
--- projects/clang700-import/etc/mtree/BSD.debug.dist (revision 340124)
+++ projects/clang700-import/etc/mtree/BSD.debug.dist (revision 340125)
@@ -1,62 +1,62 @@
# $FreeBSD$
#
# Please see the file src/etc/mtree/README before making changes to this file.
#
/set type=dir uname=root gname=wheel mode=0755
.
debug
bin
..
boot
kernel
..
modules
..
..
lib
casper
..
geom
..
..
libexec
..
sbin
..
usr
bin
..
lib
clang
- 7.0.0
+ 7.0.1
lib
freebsd
..
..
..
..
engines
..
i18n
..
..
libexec
bsdinstall
..
lpr
ru
..
..
sendmail
..
sm.bin
..
..
sbin
..
tests
..
..
..
..
Index: projects/clang700-import/etc/mtree/BSD.usr.dist
===================================================================
--- projects/clang700-import/etc/mtree/BSD.usr.dist (revision 340124)
+++ projects/clang700-import/etc/mtree/BSD.usr.dist (revision 340125)
@@ -1,1242 +1,1242 @@
# $FreeBSD$
#
# Please see the file src/etc/mtree/README before making changes to this file.
#
/set type=dir uname=root gname=wheel mode=0755
.
bin
..
include
private
bsdstat
..
event
..
sqlite3
..
ucl
..
zstd
..
..
..
lib
aout
..
clang
- 7.0.0
+ 7.0.1
include
sanitizer
..
..
lib
freebsd
..
..
..
..
compat
aout
..
..
dtrace
..
engines
..
i18n
..
libxo
encoder
..
..
..
libdata
gcc
..
ldscripts
..
pkgconfig
..
..
libexec
bsdconfig
020.docsinstall
include
..
..
030.packages
include
..
..
040.password
include
..
..
050.diskmgmt
include
..
..
070.usermgmt
include
..
..
080.console
include
..
..
090.timezone
include
..
..
110.mouse
include
..
..
120.networking
include
..
..
130.security
include
..
..
140.startup
include
..
..
150.ttys
include
..
..
dot
include
..
..
include
..
includes
include
..
..
..
bsdinstall
..
dwatch
..
hyperv
..
lpr
ru
..
..
sendmail
..
sm.bin
..
..
local
..
obj nochange
..
sbin
..
share
atf
..
bsdconfig
media
..
networking
..
packages
..
password
..
startup
..
timezone
..
usermgmt
..
..
calendar
de_AT.ISO_8859-15
..
de_DE.ISO8859-1
..
fr_FR.ISO8859-1
..
hr_HR.ISO8859-2
..
hu_HU.ISO8859-2
..
pt_BR.ISO8859-1
..
pt_BR.UTF-8
..
ru_RU.KOI8-R
..
ru_RU.UTF-8
..
uk_UA.KOI8-U
..
..
dict
..
doc
IPv6
..
atf
..
legal
..
llvm
clang
..
..
ncurses
..
ntp
drivers
icons
..
scripts
..
..
hints
..
icons
..
pic
..
scripts
..
..
pjdfstest
..
..
dtrace
..
examples
BSD_daemon
..
FreeBSD_version
..
IPv6
..
bhyve
..
bootforth
..
bsdconfig
..
csh
..
diskless
..
dma
..
drivers
..
dwatch
..
etc
defaults
..
..
find_interface
..
hast
..
hostapd
..
ibcs2
..
indent
..
ipfilter
..
ipfw
..
jails
..
kld
cdev
module
..
test
..
..
dyn_sysctl
..
firmware
fwconsumer
..
fwimage
..
..
khelp
..
syscall
module
..
test
..
..
..
libusb20
..
libvgl
..
mdoc
..
netgraph
..
pc-sysinstall
..
perfmon
..
pf
..
ppi
..
ppp
..
printing
..
scsi_target
..
ses
getencstat
..
sesd
..
setencstat
..
setobjstat
..
srcs
..
..
smbfs
print
..
..
sunrpc
dir
..
msg
..
sort
..
..
tcsh
..
uefisign
..
ypldap
..
..
firmware
..
games
fortune
..
..
i18n
csmapper
APPLE
..
AST
..
BIG5
..
CNS
..
CP
..
EBCDIC
..
GB
..
GEORGIAN
..
ISO-8859
..
ISO646
..
JIS
..
KAZAKH
..
KOI
..
KS
..
MISC
..
TCVN
..
..
esdb
APPLE
..
AST
..
BIG5
..
CP
..
DEC
..
EBCDIC
..
EUC
..
GB
..
GEORGIAN
..
ISO-2022
..
ISO-8859
..
ISO646
..
KAZAKH
..
KOI
..
MISC
..
TCVN
..
UTF
..
..
..
keys
pkg
revoked
..
trusted
..
..
..
locale
af_ZA.ISO8859-1
..
af_ZA.ISO8859-15
..
af_ZA.UTF-8
..
ar_AE.UTF-8
..
ar_EG.UTF-8
..
ar_JO.UTF-8
..
ar_MA.UTF-8
..
ar_QA.UTF-8
..
ar_SA.UTF-8
..
am_ET.UTF-8
..
be_BY.CP1131
..
be_BY.CP1251
..
be_BY.ISO8859-5
..
be_BY.UTF-8
..
bg_BG.CP1251
..
bg_BG.UTF-8
..
ca_AD.ISO8859-1
..
ca_AD.ISO8859-15
..
ca_ES.ISO8859-1
..
ca_ES.ISO8859-15
..
ca_FR.ISO8859-1
..
ca_FR.ISO8859-15
..
ca_IT.ISO8859-1
..
ca_IT.ISO8859-15
..
ca_AD.UTF-8
..
ca_ES.UTF-8
..
ca_FR.UTF-8
..
ca_IT.UTF-8
..
cs_CZ.ISO8859-2
..
cs_CZ.UTF-8
..
da_DK.ISO8859-1
..
da_DK.ISO8859-15
..
da_DK.UTF-8
..
de_AT.ISO8859-1
..
de_AT.ISO8859-15
..
de_AT.UTF-8
..
de_CH.ISO8859-1
..
de_CH.ISO8859-15
..
de_CH.UTF-8
..
de_DE.ISO8859-1
..
de_DE.ISO8859-15
..
de_DE.UTF-8
..
el_GR.ISO8859-7
..
el_GR.UTF-8
..
en_AU.ISO8859-1
..
en_AU.ISO8859-15
..
en_AU.US-ASCII
..
en_AU.UTF-8
..
en_CA.ISO8859-1
..
en_CA.ISO8859-15
..
en_CA.US-ASCII
..
en_CA.UTF-8
..
en_GB.ISO8859-1
..
en_GB.ISO8859-15
..
en_GB.US-ASCII
..
en_GB.UTF-8
..
en_HK.ISO8859-1
..
en_HK.UTF-8
..
en_IE.ISO8859-1
..
en_IE.ISO8859-15
..
en_IE.UTF-8
..
en_NZ.ISO8859-1
..
en_NZ.ISO8859-15
..
en_NZ.US-ASCII
..
en_NZ.UTF-8
..
en_PH.UTF-8
..
en_SG.ISO8859-1
..
en_SG.UTF-8
..
en_US.ISO8859-1
..
en_US.ISO8859-15
..
en_US.US-ASCII
..
en_US.UTF-8
..
en_ZA.ISO8859-1
..
en_ZA.ISO8859-15
..
en_ZA.US-ASCII
..
en_ZA.UTF-8
..
es_AR.ISO8859-1
..
es_AR.UTF-8
..
es_CR.UTF-8
..
es_ES.ISO8859-1
..
es_ES.ISO8859-15
..
es_ES.UTF-8
..
es_MX.ISO8859-1
..
es_MX.UTF-8
..
et_EE.ISO8859-1
..
et_EE.ISO8859-15
..
et_EE.UTF-8
..
eu_ES.ISO8859-1
..
eu_ES.ISO8859-15
..
eu_ES.UTF-8
..
fi_FI.ISO8859-1
..
fi_FI.ISO8859-15
..
fi_FI.UTF-8
..
fr_BE.ISO8859-1
..
fr_BE.ISO8859-15
..
fr_BE.UTF-8
..
fr_CA.ISO8859-1
..
fr_CA.ISO8859-15
..
fr_CA.UTF-8
..
fr_CH.ISO8859-1
..
fr_CH.ISO8859-15
..
fr_CH.UTF-8
..
fr_FR.ISO8859-1
..
fr_FR.ISO8859-15
..
fr_FR.UTF-8
..
he_IL.UTF-8
..
hi_IN.ISCII-DEV
..
hi_IN.UTF-8
..
hr_HR.ISO8859-2
..
hr_HR.UTF-8
..
hu_HU.ISO8859-2
..
hu_HU.UTF-8
..
hy_AM.ARMSCII-8
..
hy_AM.UTF-8
..
is_IS.ISO8859-1
..
is_IS.ISO8859-15
..
is_IS.UTF-8
..
it_CH.ISO8859-1
..
it_CH.ISO8859-15
..
it_CH.UTF-8
..
it_IT.ISO8859-1
..
it_IT.ISO8859-15
..
it_IT.UTF-8
..
ja_JP.SJIS
..
ja_JP.UTF-8
..
ja_JP.eucJP
..
kk_KZ.UTF-8
..
ko_KR.CP949
..
ko_KR.UTF-8
..
ko_KR.eucKR
..
lt_LT.ISO8859-13
..
lt_LT.UTF-8
..
lv_LV.ISO8859-13
..
lv_LV.UTF-8
..
mn_MN.UTF-8
..
nb_NO.ISO8859-1
..
nb_NO.ISO8859-15
..
nb_NO.UTF-8
..
nl_BE.ISO8859-1
..
nl_BE.ISO8859-15
..
nl_BE.UTF-8
..
nl_NL.ISO8859-1
..
nl_NL.ISO8859-15
..
nl_NL.UTF-8
..
nn_NO.ISO8859-1
..
nn_NO.ISO8859-15
..
nn_NO.UTF-8
..
pl_PL.ISO8859-2
..
pl_PL.UTF-8
..
pt_BR.ISO8859-1
..
pt_BR.UTF-8
..
pt_PT.ISO8859-1
..
pt_PT.ISO8859-15
..
pt_PT.UTF-8
..
ro_RO.ISO8859-2
..
ro_RO.UTF-8
..
ru_RU.CP1251
..
ru_RU.CP866
..
ru_RU.ISO8859-5
..
ru_RU.KOI8-R
..
ru_RU.UTF-8
..
se_FI.UTF-8
..
se_NO.UTF-8
..
sk_SK.ISO8859-2
..
sk_SK.UTF-8
..
sl_SI.ISO8859-2
..
sl_SI.UTF-8
..
sr_RS.ISO8859-5
..
sr_RS.UTF-8
..
sr_RS.ISO8859-2
..
sr_RS.UTF-8@latin
..
sv_FI.ISO8859-1
..
sv_FI.ISO8859-15
..
sv_FI.UTF-8
..
sv_SE.ISO8859-1
..
sv_SE.ISO8859-15
..
sv_SE.UTF-8
..
tr_TR.ISO8859-9
..
tr_TR.UTF-8
..
uk_UA.CP1251
..
uk_UA.ISO8859-5
..
uk_UA.KOI8-U
..
uk_UA.UTF-8
..
zh_CN.GB18030
..
zh_CN.GB2312
..
zh_CN.GBK
..
zh_CN.eucCN
..
zh_CN.UTF-8
..
zh_HK.UTF-8
..
zh_TW.Big5
..
zh_TW.UTF-8
..
..
man
man1
..
man2
..
man3
..
man4
aarch64
..
amd64
..
arm
..
i386
..
powerpc
..
sparc64
..
..
man5
..
man6
..
man7
..
man8
amd64
..
i386
..
powerpc
..
sparc64
..
..
man9
..
..
misc
fonts
..
..
mk
..
nls
C
..
af_ZA.ISO8859-1
..
af_ZA.ISO8859-15
..
af_ZA.UTF-8
..
am_ET.UTF-8
..
be_BY.CP1131
..
be_BY.CP1251
..
be_BY.ISO8859-5
..
be_BY.UTF-8
..
bg_BG.CP1251
..
bg_BG.UTF-8
..
ca_ES.ISO8859-1
..
ca_ES.ISO8859-15
..
ca_ES.UTF-8
..
cs_CZ.ISO8859-2
..
cs_CZ.UTF-8
..
da_DK.ISO8859-1
..
da_DK.ISO8859-15
..
da_DK.UTF-8
..
de_AT.ISO8859-1
..
de_AT.ISO8859-15
..
de_AT.UTF-8
..
de_CH.ISO8859-1
..
de_CH.ISO8859-15
..
de_CH.UTF-8
..
de_DE.ISO8859-1
..
de_DE.ISO8859-15
..
de_DE.UTF-8
..
el_GR.ISO8859-7
..
el_GR.UTF-8
..
en_AU.ISO8859-1
..
en_AU.ISO8859-15
..
en_AU.US-ASCII
..
en_AU.UTF-8
..
en_CA.ISO8859-1
..
en_CA.ISO8859-15
..
en_CA.US-ASCII
..
en_CA.UTF-8
..
en_GB.ISO8859-1
..
en_GB.ISO8859-15
..
en_GB.US-ASCII
..
en_GB.UTF-8
..
en_IE.UTF-8
..
en_NZ.ISO8859-1
..
en_NZ.ISO8859-15
..
en_NZ.US-ASCII
..
en_NZ.UTF-8
..
en_US.ISO8859-1
..
en_US.ISO8859-15
..
en_US.UTF-8
..
es_ES.ISO8859-1
..
es_ES.ISO8859-15
..
es_ES.UTF-8
..
et_EE.ISO8859-15
..
et_EE.UTF-8
..
fi_FI.ISO8859-1
..
fi_FI.ISO8859-15
..
fi_FI.UTF-8
..
fr_BE.ISO8859-1
..
fr_BE.ISO8859-15
..
fr_BE.UTF-8
..
fr_CA.ISO8859-1
..
fr_CA.ISO8859-15
..
fr_CA.UTF-8
..
fr_CH.ISO8859-1
..
fr_CH.ISO8859-15
..
fr_CH.UTF-8
..
fr_FR.ISO8859-1
..
fr_FR.ISO8859-15
..
fr_FR.UTF-8
..
gl_ES.ISO8859-1
..
he_IL.UTF-8
..
hi_IN.ISCII-DEV
..
hr_HR.ISO8859-2
..
hr_HR.UTF-8
..
hu_HU.ISO8859-2
..
hu_HU.UTF-8
..
hy_AM.ARMSCII-8
..
hy_AM.UTF-8
..
is_IS.ISO8859-1
..
is_IS.ISO8859-15
..
is_IS.UTF-8
..
it_CH.ISO8859-1
..
it_CH.ISO8859-15
..
it_CH.UTF-8
..
it_IT.ISO8859-1
..
it_IT.ISO8859-15
..
it_IT.UTF-8
..
ja_JP.SJIS
..
ja_JP.UTF-8
..
ja_JP.eucJP
..
kk_KZ.PT154
..
kk_KZ.UTF-8
..
ko_KR.CP949
..
ko_KR.UTF-8
..
ko_KR.eucKR
..
lt_LT.ISO8859-13
..
lt_LT.UTF-8
..
lv_LV.ISO8859-13
..
lv_LV.UTF-8
..
mn_MN.UTF-8
..
nl_BE.ISO8859-1
..
nl_BE.ISO8859-15
..
nl_BE.UTF-8
..
nl_NL.ISO8859-1
..
nl_NL.ISO8859-15
..
nl_NL.UTF-8
..
no_NO.ISO8859-1
..
no_NO.ISO8859-15
..
no_NO.UTF-8
..
pl_PL.ISO8859-2
..
pl_PL.UTF-8
..
pt_BR.ISO8859-1
..
pt_BR.UTF-8
..
pt_PT.ISO8859-1
..
pt_PT.ISO8859-15
..
pt_PT.UTF-8
..
ro_RO.ISO8859-2
..
ro_RO.UTF-8
..
ru_RU.CP1251
..
ru_RU.CP866
..
ru_RU.ISO8859-5
..
ru_RU.KOI8-R
..
ru_RU.UTF-8
..
sk_SK.ISO8859-2
..
sk_SK.UTF-8
..
sl_SI.ISO8859-2
..
sl_SI.UTF-8
..
sr_YU.ISO8859-2
..
sr_YU.ISO8859-5
..
sr_YU.UTF-8
..
sv_SE.ISO8859-1
..
sv_SE.ISO8859-15
..
sv_SE.UTF-8
..
tr_TR.ISO8859-9
..
tr_TR.UTF-8
..
uk_UA.ISO8859-5
..
uk_UA.KOI8-U
..
uk_UA.UTF-8
..
zh_CN.GB18030
..
zh_CN.GB2312
..
zh_CN.GBK
..
zh_CN.UTF-8
..
zh_CN.eucCN
..
zh_HK.UTF-8
..
zh_TW.UTF-8
..
..
openssl
man
man1
..
man3
..
..
..
pc-sysinstall
backend
..
backend-partmanager
..
backend-query
..
conf
license
..
..
doc
..
..
security
..
sendmail
..
skel
..
snmp
defs
..
mibs
..
..
syscons
fonts
..
keymaps
..
scrnmaps
..
..
tabset
..
vi
catalog
..
..
vt
fonts
..
keymaps
..
..
zoneinfo
Africa
..
America
Argentina
..
Indiana
..
Kentucky
..
North_Dakota
..
..
Antarctica
..
Arctic
..
Asia
..
Atlantic
..
Australia
..
Etc
..
Europe
..
Indian
..
Pacific
..
SystemV
..
..
..
src nochange
..
..
Index: projects/clang700-import/lib/clang/headers/Makefile
===================================================================
--- projects/clang700-import/lib/clang/headers/Makefile (revision 340124)
+++ projects/clang700-import/lib/clang/headers/Makefile (revision 340125)
@@ -1,145 +1,145 @@
# $FreeBSD$
.include "../clang.pre.mk"
.PATH: ${CLANG_SRCS}/lib/Headers
-INCSDIR= ${LIBDIR}/clang/7.0.0/include
+INCSDIR= ${LIBDIR}/clang/7.0.1/include
GENINCS+= arm_fp16.h
GENINCS+= arm_neon.h
INCS+= __clang_cuda_builtin_vars.h
INCS+= __clang_cuda_cmath.h
INCS+= __clang_cuda_complex_builtins.h
INCS+= __clang_cuda_device_functions.h
INCS+= __clang_cuda_intrinsics.h
INCS+= __clang_cuda_libdevice_declares.h
INCS+= __clang_cuda_math_forward_declares.h
INCS+= __clang_cuda_runtime_wrapper.h
INCS+= __stddef_max_align_t.h
INCS+= __wmmintrin_aes.h
INCS+= __wmmintrin_pclmul.h
INCS+= adxintrin.h
INCS+= altivec.h
INCS+= ammintrin.h
INCS+= arm64intr.h
INCS+= arm_acle.h
INCS+= armintr.h
INCS+= avx2intrin.h
INCS+= avx512bitalgintrin.h
INCS+= avx512bwintrin.h
INCS+= avx512cdintrin.h
INCS+= avx512dqintrin.h
INCS+= avx512erintrin.h
INCS+= avx512fintrin.h
INCS+= avx512ifmaintrin.h
INCS+= avx512ifmavlintrin.h
INCS+= avx512pfintrin.h
INCS+= avx512vbmi2intrin.h
INCS+= avx512vbmiintrin.h
INCS+= avx512vbmivlintrin.h
INCS+= avx512vlbitalgintrin.h
INCS+= avx512vlbwintrin.h
INCS+= avx512vlcdintrin.h
INCS+= avx512vldqintrin.h
INCS+= avx512vlintrin.h
INCS+= avx512vlvbmi2intrin.h
INCS+= avx512vlvnniintrin.h
INCS+= avx512vnniintrin.h
INCS+= avx512vpopcntdqintrin.h
INCS+= avx512vpopcntdqvlintrin.h
INCS+= avxintrin.h
INCS+= bmi2intrin.h
INCS+= bmiintrin.h
INCS+= cetintrin.h
INCS+= cldemoteintrin.h
INCS+= clflushoptintrin.h
INCS+= clwbintrin.h
INCS+= clzerointrin.h
INCS+= cpuid.h
INCS+= emmintrin.h
INCS+= f16cintrin.h
INCS+= fma4intrin.h
INCS+= fmaintrin.h
INCS+= fxsrintrin.h
INCS+= gfniintrin.h
INCS+= htmintrin.h
INCS+= htmxlintrin.h
INCS+= ia32intrin.h
INCS+= immintrin.h
INCS+= invpcidintrin.h
INCS+= lwpintrin.h
INCS+= lzcntintrin.h
INCS+= mm3dnow.h
INCS+= mm_malloc.h
INCS+= mmintrin.h
INCS+= module.modulemap
INCS+= movdirintrin.h
INCS+= msa.h
INCS+= mwaitxintrin.h
INCS+= nmmintrin.h
INCS+= opencl-c.h
INCS+= pconfigintrin.h
INCS+= pkuintrin.h
INCS+= pmmintrin.h
INCS+= popcntintrin.h
INCS+= prfchwintrin.h
INCS+= ptwriteintrin.h
INCS+= rdseedintrin.h
INCS+= rtmintrin.h
INCS+= s390intrin.h
INCS+= sgxintrin.h
INCS+= shaintrin.h
INCS+= smmintrin.h
INCS+= tbmintrin.h
INCS+= tmmintrin.h
INCS+= vadefs.h
INCS+= vaesintrin.h
INCS+= vecintrin.h
INCS+= vpclmulqdqintrin.h
INCS+= waitpkgintrin.h
INCS+= wbnoinvdintrin.h
INCS+= wmmintrin.h
INCS+= x86intrin.h
INCS+= xmmintrin.h
INCS+= xopintrin.h
INCS+= xsavecintrin.h
INCS+= xsaveintrin.h
INCS+= xsaveoptintrin.h
INCS+= xsavesintrin.h
INCS+= xtestintrin.h
INCS+= ${GENINCS}
# Headers which possibly conflict with our own versions:
.if defined(INSTALL_CONFLICTING_CLANG_HEADERS)
INCS+= float.h
INCS+= intrin.h
INCS+= inttypes.h
INCS+= iso646.h
INCS+= limits.h
INCS+= stdalign.h
INCS+= stdarg.h
INCS+= stdatomic.h
INCS+= stdbool.h
INCS+= stddef.h
INCS+= stdint.h
INCS+= stdnoreturn.h
INCS+= tgmath.h
INCS+= unwind.h
INCS+= varargs.h
.endif
arm_fp16.h: ${CLANG_SRCS}/include/clang/Basic/arm_fp16.td
${CLANG_TBLGEN} -gen-arm-fp16 \
-I ${CLANG_SRCS}/include/clang/Basic -d ${.TARGET:C/$/.d/} \
-o ${.TARGET} ${CLANG_SRCS}/include/clang/Basic/arm_fp16.td
arm_neon.h: ${CLANG_SRCS}/include/clang/Basic/arm_neon.td
${CLANG_TBLGEN} -gen-arm-neon \
-I ${CLANG_SRCS}/include/clang/Basic -d ${.TARGET:C/$/.d/} \
-o ${.TARGET} ${CLANG_SRCS}/include/clang/Basic/arm_neon.td
CLEANFILES= ${GENINCS} ${GENINCS:C/$/.d/}
.include <bsd.prog.mk>
Index: projects/clang700-import/lib/clang/include/clang/Basic/Version.inc
===================================================================
--- projects/clang700-import/lib/clang/include/clang/Basic/Version.inc (revision 340124)
+++ projects/clang700-import/lib/clang/include/clang/Basic/Version.inc (revision 340125)
@@ -1,11 +1,11 @@
/* $FreeBSD$ */
-#define CLANG_VERSION 7.0.0
-#define CLANG_VERSION_STRING "7.0.0"
+#define CLANG_VERSION 7.0.1
+#define CLANG_VERSION_STRING "7.0.1"
#define CLANG_VERSION_MAJOR 7
#define CLANG_VERSION_MINOR 0
-#define CLANG_VERSION_PATCHLEVEL 0
+#define CLANG_VERSION_PATCHLEVEL 1
#define CLANG_VENDOR "FreeBSD "
-#define SVN_REVISION "342383"
+#define SVN_REVISION "346007"
Index: projects/clang700-import/lib/clang/include/clang/Config/config.h
===================================================================
--- projects/clang700-import/lib/clang/include/clang/Config/config.h (revision 340124)
+++ projects/clang700-import/lib/clang/include/clang/Config/config.h (revision 340125)
@@ -1,84 +1,84 @@
/* $FreeBSD$ */
/* This generated file is for internal use. Do not include it from headers. */
#ifdef CLANG_CONFIG_H
#error config.h can only be included once
#else
#define CLANG_CONFIG_H
/* Bug report URL. */
#define BUG_REPORT_URL "https://bugs.freebsd.org/submit/"
/* Default linker to use. */
#define CLANG_DEFAULT_LINKER ""
/* Default C/ObjC standard to use. */
/* #undef CLANG_DEFAULT_STD_C */
/* Default C++/ObjC++ standard to use. */
/* #undef CLANG_DEFAULT_STD_CXX */
/* Default C++ stdlib to use. */
#define CLANG_DEFAULT_CXX_STDLIB ""
/* Default runtime library to use. */
#define CLANG_DEFAULT_RTLIB ""
/* Default objcopy to use */
#define CLANG_DEFAULT_OBJCOPY "objcopy"
/* Default OpenMP runtime used by -fopenmp. */
#define CLANG_DEFAULT_OPENMP_RUNTIME "libomp"
/* Default architecture for OpenMP offloading to Nvidia GPUs. */
#define CLANG_OPENMP_NVPTX_DEFAULT_ARCH "sm_35"
/* Multilib suffix for libdir. */
#define CLANG_LIBDIR_SUFFIX ""
/* Relative directory for resource files */
#define CLANG_RESOURCE_DIR ""
/* Directories clang will search for headers */
#define C_INCLUDE_DIRS ""
/* Directories clang will search for configuration files */
/* #undef CLANG_CONFIG_FILE_SYSTEM_DIR */
/* #undef CLANG_CONFIG_FILE_USER_DIR */
/* Default <path> to all compiler invocations for --sysroot=<path>. */
/* #undef DEFAULT_SYSROOT */
/* Directory where gcc is installed. */
#define GCC_INSTALL_PREFIX ""
/* Define if we have libxml2 */
/* #undef CLANG_HAVE_LIBXML */
/* Define if we have z3 and want to build it */
/* #undef CLANG_ANALYZER_WITH_Z3 */
/* Define if we have sys/resource.h (rlimits) */
#define CLANG_HAVE_RLIMITS 1
/* The LLVM product name and version */
-#define BACKEND_PACKAGE_STRING "LLVM 7.0.0"
+#define BACKEND_PACKAGE_STRING "LLVM 7.0.1"
/* Linker version detected at compile time. */
/* #undef HOST_LINK_VERSION */
/* pass --build-id to ld */
/* #undef ENABLE_LINKER_BUILD_ID */
/* enable x86 relax relocations by default */
#define ENABLE_X86_RELAX_RELOCATIONS 0
/* Enable the experimental new pass manager by default */
#define ENABLE_EXPERIMENTAL_NEW_PASS_MANAGER 0
/* Enable each functionality of modules */
/* #undef CLANG_ENABLE_ARCMT */
/* #undef CLANG_ENABLE_OBJC_REWRITER */
/* #undef CLANG_ENABLE_STATIC_ANALYZER */
#endif
Index: projects/clang700-import/lib/clang/include/lld/Common/Version.inc
===================================================================
--- projects/clang700-import/lib/clang/include/lld/Common/Version.inc (revision 340124)
+++ projects/clang700-import/lib/clang/include/lld/Common/Version.inc (revision 340125)
@@ -1,10 +1,10 @@
// $FreeBSD$
-#define LLD_VERSION 7.0.0
-#define LLD_VERSION_STRING "7.0.0"
+#define LLD_VERSION 7.0.1
+#define LLD_VERSION_STRING "7.0.1"
#define LLD_VERSION_MAJOR 7
#define LLD_VERSION_MINOR 0
#define LLD_REPOSITORY_STRING "FreeBSD"
// <Upstream revision at import>-<Local identifier in __FreeBSD_version style>
-#define LLD_REVISION_STRING "342383-1300001"
+#define LLD_REVISION_STRING "346007-1300001"
Index: projects/clang700-import/lib/clang/include/llvm/Config/config.h
===================================================================
--- projects/clang700-import/lib/clang/include/llvm/Config/config.h (revision 340124)
+++ projects/clang700-import/lib/clang/include/llvm/Config/config.h (revision 340125)
@@ -1,347 +1,347 @@
/* $FreeBSD$ */
#ifndef CONFIG_H
#define CONFIG_H
/* Exported configuration */
#include "llvm/Config/llvm-config.h"
/* Bug report URL. */
#define BUG_REPORT_URL "https://bugs.freebsd.org/submit/"
/* Define to 1 to enable backtraces, and to 0 otherwise. */
#define ENABLE_BACKTRACES 0
/* Define to 1 to enable crash overrides, and to 0 otherwise. */
#define ENABLE_CRASH_OVERRIDES 1
/* Define to 1 if you have the `backtrace' function. */
#define HAVE_BACKTRACE TRUE
#define BACKTRACE_HEADER <execinfo.h>
/* Define to 1 if you have the <CrashReporterClient.h> header file. */
/* #undef HAVE_CRASHREPORTERCLIENT_H */
/* can use __crashreporter_info__ */
#define HAVE_CRASHREPORTER_INFO 0
/* Define to 1 if you have the declaration of `arc4random', and to 0 if you
don't. */
#define HAVE_DECL_ARC4RANDOM 1
/* Define to 1 if you have the declaration of `FE_ALL_EXCEPT', and to 0 if you
don't. */
#define HAVE_DECL_FE_ALL_EXCEPT 1
/* Define to 1 if you have the declaration of `FE_INEXACT', and to 0 if you
don't. */
#define HAVE_DECL_FE_INEXACT 1
/* Define to 1 if you have the declaration of `strerror_s', and to 0 if you
don't. */
#define HAVE_DECL_STRERROR_S 0
/* Define to 1 if you have the DIA SDK installed, and to 0 if you don't. */
#define LLVM_ENABLE_DIA_SDK 0
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* Define if dlopen() is available on this platform. */
#define HAVE_DLOPEN 1
/* Define if dladdr() is available on this platform. */
#define HAVE_DLADDR 1
/* Define to 1 if you have the <errno.h> header file. */
#define HAVE_ERRNO_H 1
/* Define to 1 if you have the <fcntl.h> header file. */
#define HAVE_FCNTL_H 1
/* Define to 1 if you have the <fenv.h> header file. */
#define HAVE_FENV_H 1
/* Define if libffi is available on this platform. */
/* #undef HAVE_FFI_CALL */
/* Define to 1 if you have the <ffi/ffi.h> header file. */
/* #undef HAVE_FFI_FFI_H */
/* Define to 1 if you have the <ffi.h> header file. */
/* #undef HAVE_FFI_H */
/* Define to 1 if you have the `futimens' function. */
#if __FreeBSD__ >= 11
#define HAVE_FUTIMENS 1
#endif
/* Define to 1 if you have the `futimes' function. */
#define HAVE_FUTIMES 1
/* Define to 1 if you have the `getpagesize' function. */
#define HAVE_GETPAGESIZE 1
/* Define to 1 if you have the `getrlimit' function. */
#define HAVE_GETRLIMIT 1
/* Define to 1 if you have the `getrusage' function. */
#define HAVE_GETRUSAGE 1
/* Define to 1 if you have the `isatty' function. */
#define HAVE_ISATTY 1
/* Define to 1 if you have the `edit' library (-ledit). */
#define HAVE_LIBEDIT 1
/* Define to 1 if you have the `pfm' library (-lpfm). */
/* #undef HAVE_LIBPFM */
/* Define to 1 if you have the `psapi' library (-lpsapi). */
/* #undef HAVE_LIBPSAPI */
/* Define to 1 if you have the `pthread' library (-lpthread). */
#define HAVE_LIBPTHREAD 1
/* Define to 1 if you have the `pthread_getname_np' function. */
/* #undef HAVE_PTHREAD_GETNAME_NP */
/* Define to 1 if you have the `pthread_setname_np' function. */
/* #undef HAVE_PTHREAD_SETNAME_NP */
/* Define to 1 if you have the `z' library (-lz). */
#define HAVE_LIBZ 1
/* Define to 1 if you have the <link.h> header file. */
#define HAVE_LINK_H 1
/* Define to 1 if you have the `lseek64' function. */
/* #undef HAVE_LSEEK64 */
/* Define to 1 if you have the <mach/mach.h> header file. */
/* #undef HAVE_MACH_MACH_H */
/* Define to 1 if you have the `mallctl' function. */
#define HAVE_MALLCTL 1
/* Define to 1 if you have the `mallinfo' function. */
/* #undef HAVE_MALLINFO */
/* Define to 1 if you have the <malloc.h> header file. */
/* #undef HAVE_MALLOC_H */
/* Define to 1 if you have the <malloc/malloc.h> header file. */
/* #undef HAVE_MALLOC_MALLOC_H */
/* Define to 1 if you have the `malloc_zone_statistics' function. */
/* #undef HAVE_MALLOC_ZONE_STATISTICS */
/* Define to 1 if you have the `posix_fallocate' function. */
#define HAVE_POSIX_FALLOCATE 1
/* Define to 1 if you have the `posix_spawn' function. */
#define HAVE_POSIX_SPAWN 1
/* Define to 1 if you have the `pread' function. */
#define HAVE_PREAD 1
/* Have pthread_getspecific */
#define HAVE_PTHREAD_GETSPECIFIC 1
/* Define to 1 if you have the <pthread.h> header file. */
#define HAVE_PTHREAD_H 1
/* Have pthread_mutex_lock */
#define HAVE_PTHREAD_MUTEX_LOCK 1
/* Have pthread_rwlock_init */
#define HAVE_PTHREAD_RWLOCK_INIT 1
/* Define to 1 if you have the `realpath' function. */
#define HAVE_REALPATH 1
/* Define to 1 if you have the `sbrk' function. */
#define HAVE_SBRK 1
/* Define to 1 if you have the `setenv' function. */
#define HAVE_SETENV 1
/* Define to 1 if you have the `sched_getaffinity' function. */
/* #undef HAVE_SCHED_GETAFFINITY */
/* Define to 1 if you have the `CPU_COUNT' macro. */
/* #undef HAVE_CPU_COUNT */
/* Define to 1 if you have the `setrlimit' function. */
#define HAVE_SETRLIMIT 1
/* Define to 1 if you have the `sigaltstack' function. */
#define HAVE_SIGALTSTACK 1
/* Define to 1 if you have the <signal.h> header file. */
#define HAVE_SIGNAL_H 1
/* Define to 1 if you have the `strerror' function. */
#define HAVE_STRERROR 1
/* Define to 1 if you have the `strerror_r' function. */
#define HAVE_STRERROR_R 1
/* Define to 1 if you have the `sysconf' function. */
#define HAVE_SYSCONF 1
/* Define to 1 if you have the <sys/ioctl.h> header file. */
#define HAVE_SYS_IOCTL_H 1
/* Define to 1 if you have the <sys/mman.h> header file. */
#define HAVE_SYS_MMAN_H 1
/* Define to 1 if you have the <sys/param.h> header file. */
#define HAVE_SYS_PARAM_H 1
/* Define to 1 if you have the <sys/resource.h> header file. */
#define HAVE_SYS_RESOURCE_H 1
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/time.h> header file. */
#define HAVE_SYS_TIME_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* Define if the setupterm() function is supported this platform. */
#define HAVE_TERMINFO 1
/* Define if the xar_open() function is supported this platform. */
/* #undef HAVE_LIBXAR */
/* Define to 1 if you have the <termios.h> header file. */
#define HAVE_TERMIOS_H 1
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* Define to 1 if you have the <valgrind/valgrind.h> header file. */
/* #undef HAVE_VALGRIND_VALGRIND_H */
/* Define to 1 if you have the <zlib.h> header file. */
#define HAVE_ZLIB_H 1
/* Have host's _alloca */
/* #undef HAVE__ALLOCA */
/* Define to 1 if you have the `_chsize_s' function. */
/* #undef HAVE__CHSIZE_S */
/* Define to 1 if you have the `_Unwind_Backtrace' function. */
/* #undef HAVE__UNWIND_BACKTRACE */
/* Have host's __alloca */
/* #undef HAVE___ALLOCA */
/* Have host's __ashldi3 */
/* #undef HAVE___ASHLDI3 */
/* Have host's __ashrdi3 */
/* #undef HAVE___ASHRDI3 */
/* Have host's __chkstk */
/* #undef HAVE___CHKSTK */
/* Have host's __chkstk_ms */
/* #undef HAVE___CHKSTK_MS */
/* Have host's __cmpdi2 */
/* #undef HAVE___CMPDI2 */
/* Have host's __divdi3 */
/* #undef HAVE___DIVDI3 */
/* Have host's __fixdfdi */
/* #undef HAVE___FIXDFDI */
/* Have host's __fixsfdi */
/* #undef HAVE___FIXSFDI */
/* Have host's __floatdidf */
/* #undef HAVE___FLOATDIDF */
/* Have host's __lshrdi3 */
/* #undef HAVE___LSHRDI3 */
/* Have host's __main */
/* #undef HAVE___MAIN */
/* Have host's __moddi3 */
/* #undef HAVE___MODDI3 */
/* Have host's __udivdi3 */
/* #undef HAVE___UDIVDI3 */
/* Have host's __umoddi3 */
/* #undef HAVE___UMODDI3 */
/* Have host's ___chkstk */
/* #undef HAVE____CHKSTK */
/* Have host's ___chkstk_ms */
/* #undef HAVE____CHKSTK_MS */
/* Linker version detected at compile time. */
/* #undef HOST_LINK_VERSION */
/* Target triple LLVM will generate code for by default */
/* Doesn't use `cmakedefine` because it is allowed to be empty. */
/* #undef LLVM_DEFAULT_TARGET_TRIPLE */
/* Define if zlib compression is available */
#define LLVM_ENABLE_ZLIB 1
/* Define if overriding target triple is enabled */
/* #undef LLVM_TARGET_TRIPLE_ENV */
/* LLVM version information */
/* #undef LLVM_VERSION_INFO */
/* Whether tools show host and target info when invoked with --version */
#define LLVM_VERSION_PRINTER_SHOW_HOST_TARGET_INFO 1
/* Define if libxml2 is supported on this platform. */
/* #undef LLVM_LIBXML2_ENABLED */
/* Define to the extension used for shared libraries, say, ".so". */
#define LTDL_SHLIB_EXT ".so"
/* Define to the address where bug reports for this package should be sent. */
#define PACKAGE_BUGREPORT "https://bugs.freebsd.org/submit/"
/* Define to the full name of this package. */
#define PACKAGE_NAME "LLVM"
/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "LLVM 7.0.0"
+#define PACKAGE_STRING "LLVM 7.0.1"
/* Define to the version of this package. */
-#define PACKAGE_VERSION "7.0.0"
+#define PACKAGE_VERSION "7.0.1"
/* Define to the vendor of this package. */
/* #undef PACKAGE_VENDOR */
/* Define as the return type of signal handlers (`int' or `void'). */
#define RETSIGTYPE void
/* Define to a function implementing stricmp */
/* #undef stricmp */
/* Define to a function implementing strdup */
/* #undef strdup */
/* Whether GlobalISel rule coverage is being collected */
#define LLVM_GISEL_COV_ENABLED 0
/* Define to the default GlobalISel coverage file prefix */
/* #undef LLVM_GISEL_COV_PREFIX */
#endif
Index: projects/clang700-import/lib/clang/include/llvm/Config/llvm-config.h
===================================================================
--- projects/clang700-import/lib/clang/include/llvm/Config/llvm-config.h (revision 340124)
+++ projects/clang700-import/lib/clang/include/llvm/Config/llvm-config.h (revision 340125)
@@ -1,86 +1,86 @@
/* $FreeBSD$ */
/*===------- llvm/Config/llvm-config.h - llvm configuration -------*- C -*-===*/
/* */
/* The LLVM Compiler Infrastructure */
/* */
/* This file is distributed under the University of Illinois Open Source */
/* License. See LICENSE.TXT for details. */
/* */
/*===----------------------------------------------------------------------===*/
/* This file enumerates variables from the LLVM configuration so that they
can be in exported headers and won't override package specific directives.
This is a C header that can be included in the llvm-c headers. */
#ifndef LLVM_CONFIG_H
#define LLVM_CONFIG_H
/* Define if LLVM_ENABLE_DUMP is enabled */
/* #undef LLVM_ENABLE_DUMP */
/* Define if we link Polly to the tools */
/* #undef LINK_POLLY_INTO_TOOLS */
/* Target triple LLVM will generate code for by default */
/* #undef LLVM_DEFAULT_TARGET_TRIPLE */
/* Define if threads enabled */
#define LLVM_ENABLE_THREADS 1
/* Has gcc/MSVC atomic intrinsics */
#define LLVM_HAS_ATOMICS 1
/* Host triple LLVM will be executed on */
/* #undef LLVM_HOST_TRIPLE */
/* LLVM architecture name for the native architecture, if available */
/* #undef LLVM_NATIVE_ARCH */
/* LLVM name for the native AsmParser init function, if available */
/* #undef LLVM_NATIVE_ASMPARSER */
/* LLVM name for the native AsmPrinter init function, if available */
/* #undef LLVM_NATIVE_ASMPRINTER */
/* LLVM name for the native Disassembler init function, if available */
/* #undef LLVM_NATIVE_DISASSEMBLER */
/* LLVM name for the native Target init function, if available */
/* #undef LLVM_NATIVE_TARGET */
/* LLVM name for the native TargetInfo init function, if available */
/* #undef LLVM_NATIVE_TARGETINFO */
/* LLVM name for the native target MC init function, if available */
/* #undef LLVM_NATIVE_TARGETMC */
/* Define if this is Unixish platform */
#define LLVM_ON_UNIX 1
/* Define if we have the Intel JIT API runtime support library */
#define LLVM_USE_INTEL_JITEVENTS 0
/* Define if we have the oprofile JIT-support library */
#define LLVM_USE_OPROFILE 0
/* Define if we have the perf JIT-support library */
#define LLVM_USE_PERF 0
/* Major version of the LLVM API */
#define LLVM_VERSION_MAJOR 7
/* Minor version of the LLVM API */
#define LLVM_VERSION_MINOR 0
/* Patch version of the LLVM API */
#define LLVM_VERSION_PATCH 0
/* LLVM version string */
-#define LLVM_VERSION_STRING "7.0.0"
+#define LLVM_VERSION_STRING "7.0.1"
/* Whether LLVM records statistics for use with GetStatistics(),
* PrintStatistics() or PrintStatisticsJSON()
*/
#define LLVM_FORCE_ENABLE_STATS 0
#endif
Index: projects/clang700-import/lib/clang/include/llvm/Support/VCSRevision.h
===================================================================
--- projects/clang700-import/lib/clang/include/llvm/Support/VCSRevision.h (revision 340124)
+++ projects/clang700-import/lib/clang/include/llvm/Support/VCSRevision.h (revision 340125)
@@ -1,2 +1,2 @@
/* $FreeBSD$ */
-#define LLVM_REVISION "svn-r342383"
+#define LLVM_REVISION "svn-r346007"
Index: projects/clang700-import/lib/libclang_rt/Makefile.inc
===================================================================
--- projects/clang700-import/lib/libclang_rt/Makefile.inc (revision 340124)
+++ projects/clang700-import/lib/libclang_rt/Makefile.inc (revision 340125)
@@ -1,44 +1,44 @@
# $FreeBSD$
.include <bsd.compiler.mk>
# armv[67] is a bit special since we allow a soft-floating version via
# CPUTYPE matching *soft*. This variant may not actually work though.
.if ${MACHINE_ARCH:Marmv[67]*} != "" && \
(!defined(CPUTYPE) || ${CPUTYPE:M*soft*} == "")
CRTARCH?= armhf
.else
CRTARCH?= ${MACHINE_CPUARCH:C/amd64/x86_64/}
.endif
CRTSRC= ${SRCTOP}/contrib/compiler-rt
.PATH: ${CRTSRC}/lib
-CLANGDIR= /usr/lib/clang/7.0.0
+CLANGDIR= /usr/lib/clang/7.0.1
LIBDIR= ${CLANGDIR}/lib/freebsd
SHLIBDIR= ${LIBDIR}
NO_PIC=
MK_PROFILE= no
WARNS?= 0
SSP_CFLAGS=
CFLAGS+= -DNDEBUG
CFLAGS+= -DHAVE_RPC_XDR_H=0
CFLAGS+= -DHAVE_TIRPC_RPC_XDR_H=0
CFLAGS+= -DSANITIZER_SUPPORTS_WEAK_HOOKS=0
CFLAGS+= -DUBSAN_CAN_USE_CXXABI
CFLAGS+= ${PICFLAG}
CFLAGS+= -fno-builtin
CFLAGS+= -fno-exceptions
CXXFLAGS+= -fno-rtti
.if ${COMPILER_TYPE} == clang && ${COMPILER_VERSION} >= 30700
CFLAGS+= -fno-sanitize=safe-stack
.endif
CFLAGS+= -fno-stack-protector
CFLAGS+= -funwind-tables
CXXFLAGS+= -fvisibility-inlines-hidden
CXXFLAGS+= -fvisibility=hidden
CFLAGS+= -I${CRTSRC}/lib
CXXFLAGS+= -std=c++11
Index: projects/clang700-import/tools/build/mk/OptionalObsoleteFiles.inc
===================================================================
--- projects/clang700-import/tools/build/mk/OptionalObsoleteFiles.inc (revision 340124)
+++ projects/clang700-import/tools/build/mk/OptionalObsoleteFiles.inc (revision 340125)
@@ -1,9872 +1,9872 @@
#
# $FreeBSD$
#
# This file add support for the WITHOUT_* and WITH_* knobs in src.conf(5) to
# the check-old and delete-old* targets.
#
.if ${MK_ACCT} == no
OLD_FILES+=etc/rc.d/accounting
OLD_FILES+=etc/periodic/daily/310.accounting
OLD_FILES+=usr/sbin/accton
OLD_FILES+=usr/sbin/sa
OLD_FILES+=usr/share/man/man8/accton.8.gz
OLD_FILES+=usr/share/man/man8/sa.8.gz
OLD_FILES+=usr/tests/usr.sbin/sa/Kyuafile
OLD_FILES+=usr/tests/usr.sbin/sa/legacy_test
OLD_FILES+=usr/tests/usr.sbin/sa/v1-amd64-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-amd64-sav.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-amd64-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-amd64-usr.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-amd64-usr.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-i386-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-i386-sav.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-i386-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-i386-usr.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-i386-usr.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-sav.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-usr.in
OLD_FILES+=usr/tests/usr.sbin/sa/v1-sparc64-usr.out
OLD_FILES+=usr/tests/usr.sbin/sa/v2-amd64-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v2-amd64-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v2-amd64-usr.in
OLD_FILES+=usr/tests/usr.sbin/sa/v2-i386-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v2-i386-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v2-i386-usr.in
OLD_FILES+=usr/tests/usr.sbin/sa/v2-sparc64-sav.in
OLD_FILES+=usr/tests/usr.sbin/sa/v2-sparc64-u.out
OLD_FILES+=usr/tests/usr.sbin/sa/v2-sparc64-usr.in
OLD_FILES+=usr/tests/usr.sbin/sa
.endif
.if ${MK_ACPI} == no
OLD_FILES+=etc/devd/asus.conf
OLD_FILES+=etc/rc.d/power_profile
OLD_FILES+=usr/sbin/acpiconf
OLD_FILES+=usr/sbin/acpidb
OLD_FILES+=usr/sbin/acpidump
OLD_FILES+=usr/sbin/iasl
OLD_FILES+=usr/share/man/man8/acpiconf.8.gz
OLD_FILES+=usr/share/man/man8/acpidb.8.gz
OLD_FILES+=usr/share/man/man8/acpidump.8.gz
OLD_FILES+=usr/share/man/man8/iasl.8.gz
.endif
.if ${MK_AMD} == no
OLD_FILES+=etc/amd.map
OLD_FILES+=etc/newsyslog.conf.d/amd.conf
OLD_FILES+=etc/rc.d/amd
OLD_FILES+=usr/bin/pawd
OLD_FILES+=usr/sbin/amd
OLD_FILES+=usr/sbin/amq
OLD_FILES+=usr/sbin/fixmount
OLD_FILES+=usr/sbin/fsinfo
OLD_FILES+=usr/sbin/hlfsd
OLD_FILES+=usr/sbin/mk-amd-map
OLD_FILES+=usr/sbin/wire-test
OLD_FILES+=usr/share/examples/etc/amd.map
OLD_FILES+=usr/share/man/man1/pawd.1.gz
OLD_FILES+=usr/share/man/man5/amd.conf.5.gz
OLD_FILES+=usr/share/man/man8/amd.8.gz
OLD_FILES+=usr/share/man/man8/amq.8.gz
OLD_FILES+=usr/share/man/man8/fixmount.8.gz
OLD_FILES+=usr/share/man/man8/fsinfo.8.gz
OLD_FILES+=usr/share/man/man8/hlfsd.8.gz
OLD_FILES+=usr/share/man/man8/mk-amd-map.8.gz
OLD_FILES+=usr/share/man/man8/wire-test.8.gz
.endif
.if ${MK_APM} == no
OLD_FILES+=etc/rc.d/apm
OLD_FILES+=etc/rc.d/apmd
OLD_FILES+=etc/apmd.conf
OLD_FILES+=usr/sbin/apm
OLD_FILES+=usr/share/examples/etc/apmd.conf
OLD_FILES+=usr/share/man/man8/amd64/apm.8.gz
OLD_FILES+=usr/share/man/man8/amd64/apmconf.8.gz
.endif
.if ${MK_AT} == no
OLD_FILES+=etc/pam.d/atrun
OLD_FILES+=usr/bin/at
OLD_FILES+=usr/bin/atq
OLD_FILES+=usr/bin/atrm
OLD_FILES+=usr/bin/batch
OLD_FILES+=usr/libexec/atrun
OLD_FILES+=usr/share/man/man1/at.1.gz
OLD_FILES+=usr/share/man/man1/atq.1.gz
OLD_FILES+=usr/share/man/man1/atrm.1.gz
OLD_FILES+=usr/share/man/man1/batch.1.gz
OLD_FILES+=usr/share/man/man8/atrun.8.gz
.endif
.if ${MK_ATM} == no
OLD_FILES+=usr/bin/sscop
OLD_FILES+=usr/include/netnatm/addr.h
OLD_FILES+=usr/include/netnatm/api/atmapi.h
OLD_FILES+=usr/include/netnatm/api/ccatm.h
OLD_FILES+=usr/include/netnatm/api/unisap.h
OLD_DIRS+=usr/include/netnatm/api
OLD_FILES+=usr/include/netnatm/msg/uni_config.h
OLD_FILES+=usr/include/netnatm/msg/uni_hdr.h
OLD_FILES+=usr/include/netnatm/msg/uni_ie.h
OLD_FILES+=usr/include/netnatm/msg/uni_msg.h
OLD_FILES+=usr/include/netnatm/msg/unimsglib.h
OLD_FILES+=usr/include/netnatm/msg/uniprint.h
OLD_FILES+=usr/include/netnatm/msg/unistruct.h
OLD_DIRS+=usr/include/netnatm/msg
OLD_FILES+=usr/include/netnatm/saal/sscfu.h
OLD_FILES+=usr/include/netnatm/saal/sscfudef.h
OLD_FILES+=usr/include/netnatm/saal/sscop.h
OLD_FILES+=usr/include/netnatm/saal/sscopdef.h
OLD_DIRS+=usr/include/netnatm/saal
OLD_FILES+=usr/include/netnatm/sig/uni.h
OLD_FILES+=usr/include/netnatm/sig/unidef.h
OLD_FILES+=usr/include/netnatm/sig/unisig.h
OLD_DIRS+=usr/include/netnatm/sig
OLD_FILES+=usr/include/netnatm/unimsg.h
OLD_FILES+=usr/lib/libngatm.a
OLD_FILES+=usr/lib/libngatm.so
OLD_LIBS+=usr/lib/libngatm.so.4
OLD_FILES+=usr/lib/libngatm_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libngatm.a
OLD_FILES+=usr/lib32/libngatm.so
OLD_LIBS+=usr/lib32/libngatm.so.4
OLD_FILES+=usr/lib32/libngatm_p.a
.endif
OLD_FILES+=usr/share/man/man1/sscop.1.gz
OLD_FILES+=usr/share/man/man3/libngatm.3.gz
OLD_FILES+=usr/share/man/man3/uniaddr.3.gz
OLD_FILES+=usr/share/man/man3/unifunc.3.gz
OLD_FILES+=usr/share/man/man3/unimsg.3.gz
OLD_FILES+=usr/share/man/man3/unisap.3.gz
OLD_FILES+=usr/share/man/man3/unistruct.3.gz
.endif
.if ${MK_AUDIT} == no
OLD_FILES+=usr/sbin/audit
OLD_FILES+=usr/sbin/auditd
OLD_FILES+=usr/sbin/auditdistd
OLD_FILES+=usr/sbin/auditreduce
OLD_FILES+=usr/sbin/praudit
OLD_FILES+=usr/share/man/man1/auditreduce.1.gz
OLD_FILES+=usr/share/man/man1/praudit.1.gz
OLD_FILES+=usr/share/man/man5/auditdistd.conf.5.gz
OLD_FILES+=usr/share/man/man8/audit.8.gz
OLD_FILES+=usr/share/man/man8/auditd.8.gz
OLD_FILES+=usr/share/man/man8/auditdistd.8.gz
.endif
.if ${MK_AUTHPF} == no
OLD_FILES+=usr/sbin/authpf
OLD_FILES+=usr/sbin/authpf-noip
OLD_FILES+=usr/share/man/man8/authpf.8.gz
OLD_FILES+=usr/share/man/man8/authpf-noip.8.gz
.endif
.if ${MK_AUTOFS} == no
OLD_FILES+=etc/autofs/include_ldap
OLD_FILES+=etc/autofs/special_hosts
OLD_FILES+=etc/autofs/special_media
OLD_FILES+=etc/autofs/special_noauto
OLD_FILES+=etc/autofs/special_null
OLD_FILES+=etc/auto_master
OLD_FILES+=etc/rc.d/automount
OLD_FILES+=etc/rc.d/automountd
OLD_FILES+=etc/rc.d/autounmountd
OLD_FILES+=usr/sbin/automount
OLD_FILES+=usr/sbin/automountd
OLD_FILES+=usr/sbin/autounmountd
OLD_FILES+=usr/share/man/man5/autofs.5.gz
OLD_FILES+=usr/share/man/man5/auto_master.5.gz
OLD_FILES+=usr/share/man/man8/automount.8.gz
OLD_FILES+=usr/share/man/man8/automountd.8.gz
OLD_FILES+=usr/share/man/man8/autounmountd.8.gz
OLD_DIRS+=etc/autofs
.endif
.if ${MK_BHYVE} == no
OLD_FILES+=usr/lib/libvmmapi.a
OLD_FILES+=usr/lib/libvmmapi.so
OLD_LIBS+=usr/lib/libvmmapi.so.5
OLD_FILES+=usr/include/vmmapi.h
OLD_FILES+=usr/sbin/bhyve
OLD_FILES+=usr/sbin/bhyvectl
OLD_FILES+=usr/sbin/bhyveload
OLD_FILES+=usr/share/examples/bhyve/vmrun.sh
OLD_FILES+=usr/share/man/man8/bhyve.8.gz
OLD_FILES+=usr/share/man/man8/bhyveload.8.gz
OLD_DIRS+=usr/share/examples/bhyve
.endif
.if ${MK_BINUTILS} == no
OLD_FILES+=usr/bin/as
.if ${MK_LLD_IS_LD} == no
OLD_FILES+=usr/bin/ld
OLD_FILES+=usr/share/man/man1/ld.1.gz
.endif
OLD_FILES+=usr/bin/ld.bfd
OLD_FILES+=usr/bin/objdump
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/armelf_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/armelfb_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.x
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xc
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xd
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xn
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xr
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xs
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xu
OLD_FILES+=usr/libdata/ldscripts/elf32_sparc.xw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32btsmip_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32btsmipn32_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmip_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32ltsmipn32_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf32ppc_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.x
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xd
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xn
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xr
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xs
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xu
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc.xw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf64_sparc_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf64btsmip_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf64ltsmip_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf64ppc_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf_i386_fbsd.xw
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.x
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xbn
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xc
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xd
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xdc
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xdw
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xn
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xr
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xs
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xsc
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xsw
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xu
OLD_FILES+=usr/libdata/ldscripts/elf_x86_64_fbsd.xw
OLD_FILES+=usr/share/man/man1/as.1.gz
OLD_FILES+=usr/share/man/man1/objdump.1.gz
OLD_FILES+=usr/share/man/man7/as.7.gz
OLD_FILES+=usr/share/man/man7/ld.7.gz
OLD_FILES+=usr/share/man/man7/ldint.7.gz
OLD_FILES+=usr/share/man/man7/binutils.7.gz
.endif
.if ${MK_BLACKLIST} == no
OLD_FILES+=etc/rc.d/blacklistd
OLD_FILES+=usr/include/blacklist.h
OLD_FILES+=usr/lib/libblacklist.a
OLD_FILES+=usr/lib/libblacklist_p.a
OLD_FILES+=usr/lib/libblacklist.so
OLD_LIBS+=usr/lib/libblacklist.so.0
OLD_FILES+=usr/libexec/blacklistd-helper
OLD_FILES+=usr/sbin/blacklistctl
OLD_FILES+=usr/sbin/blacklistd
OLD_FILES+=usr/share/man/man3/blacklist.3.gz
OLD_FILES+=usr/share/man/man3/blacklist_close.3.gz
OLD_FILES+=usr/share/man/man3/blacklist_open.3.gz
OLD_FILES+=usr/share/man/man3/blacklist_r.3.gz
OLD_FILES+=usr/share/man/man3/blacklist_sa.3.gz
OLD_FILES+=usr/share/man/man3/blacklist_sa_r.3.gz
OLD_FILES+=usr/share/man/man5/blacklistd.conf.5.gz
OLD_FILES+=usr/share/man/man8/blacklistctl.8.gz
OLD_FILES+=usr/share/man/man8/blacklistd.8.gz
.endif
.if ${MK_BLUETOOTH} == no
OLD_FILES+=etc/bluetooth/hcsecd.conf
OLD_FILES+=etc/bluetooth/hosts
OLD_FILES+=etc/bluetooth/protocols
OLD_FILES+=etc/defaults/bluetooth.device.conf
OLD_DIRS+=etc/bluetooth
OLD_FILES+=etc/rc.d/bluetooth
OLD_FILES+=etc/rc.d/bthidd
OLD_FILES+=etc/rc.d/hcsecd
OLD_FILES+=etc/rc.d/rfcomm_pppd_server
OLD_FILES+=etc/rc.d/sdpd
OLD_FILES+=etc/rc.d/ubthidhci
OLD_FILES+=usr/bin/bthost
OLD_FILES+=usr/bin/btsockstat
OLD_FILES+=usr/bin/rfcomm_sppd
OLD_FILES+=usr/include/bluetooth.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_bluetooth.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_bt3c.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket_hci_raw.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket_l2cap.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket_rfcomm.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_btsocket_sco.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_h4.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_hci.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_l2cap.h
OLD_FILES+=usr/include/netgraph/bluetooth/include/ng_ubt.h
OLD_DIRS+=usr/include/netgraph/bluetooth/include
OLD_DIRS+=usr/include/netgraph/bluetooth
OLD_FILES+=usr/include/sdp.h
OLD_FILES+=usr/lib/libbluetooth.a
OLD_FILES+=usr/lib/libbluetooth.so
OLD_LIBS+=usr/lib/libbluetooth.so.4
OLD_FILES+=usr/lib/libbluetooth_p.a
OLD_FILES+=usr/lib/libsdp.a
OLD_FILES+=usr/lib/libsdp.so
OLD_LIBS+=usr/lib/libsdp.so.4
OLD_FILES+=usr/lib/libsdp_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libbluetooth.a
OLD_FILES+=usr/lib32/libbluetooth.so
OLD_LIBS+=usr/lib32/libbluetooth.so.4
OLD_FILES+=usr/lib32/libbluetooth_p.a
OLD_FILES+=usr/lib32/libsdp.a
OLD_FILES+=usr/lib32/libsdp.so
OLD_LIBS+=usr/lib32/libsdp.so.4
OLD_FILES+=usr/lib32/libsdp_p.a
.endif
OLD_FILES+=usr/sbin/ath3kfw
OLD_FILES+=usr/sbin/bcmfw
OLD_FILES+=usr/sbin/bt3cfw
OLD_FILES+=usr/sbin/bthidcontrol
OLD_FILES+=usr/sbin/bthidd
OLD_FILES+=usr/sbin/btpand
OLD_FILES+=usr/sbin/hccontrol
OLD_FILES+=usr/sbin/hcsecd
OLD_FILES+=usr/sbin/hcseriald
OLD_FILES+=usr/sbin/l2control
OLD_FILES+=usr/sbin/l2ping
OLD_FILES+=usr/sbin/rfcomm_pppd
OLD_FILES+=usr/sbin/sdpcontrol
OLD_FILES+=usr/sbin/sdpd
OLD_FILES+=usr/share/examples/etc/defaults/bluetooth.device.conf
OLD_FILES+=usr/share/man/man1/bthost.1.gz
OLD_FILES+=usr/share/man/man1/btsockstat.1.gz
OLD_FILES+=usr/share/man/man1/rfcomm_sppd.1.gz
OLD_FILES+=usr/share/man/man3/SDP_GET128.3.gz
OLD_FILES+=usr/share/man/man3/SDP_GET16.3.gz
OLD_FILES+=usr/share/man/man3/SDP_GET32.3.gz
OLD_FILES+=usr/share/man/man3/SDP_GET64.3.gz
OLD_FILES+=usr/share/man/man3/SDP_GET8.3.gz
OLD_FILES+=usr/share/man/man3/SDP_PUT128.3.gz
OLD_FILES+=usr/share/man/man3/SDP_PUT16.3.gz
OLD_FILES+=usr/share/man/man3/SDP_PUT32.3.gz
OLD_FILES+=usr/share/man/man3/SDP_PUT64.3.gz
OLD_FILES+=usr/share/man/man3/SDP_PUT8.3.gz
OLD_FILES+=usr/share/man/man3/bdaddr_any.3.gz
OLD_FILES+=usr/share/man/man3/bdaddr_copy.3.gz
OLD_FILES+=usr/share/man/man3/bdaddr_same.3.gz
OLD_FILES+=usr/share/man/man3/bluetooth.3.gz
OLD_FILES+=usr/share/man/man3/bt_aton.3.gz
OLD_FILES+=usr/share/man/man3/bt_devaddr.3.gz
OLD_FILES+=usr/share/man/man3/bt_devclose.3.gz
OLD_FILES+=usr/share/man/man3/bt_devenum.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter_evt_clr.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter_evt_set.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter_evt_tst.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter_pkt_clr.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter_pkt_set.3.gz
OLD_FILES+=usr/share/man/man3/bt_devfilter_pkt_tst.3.gz
OLD_FILES+=usr/share/man/man3/bt_devinfo.3.gz
OLD_FILES+=usr/share/man/man3/bt_devinquiry.3.gz
OLD_FILES+=usr/share/man/man3/bt_devname.3.gz
OLD_FILES+=usr/share/man/man3/bt_devopen.3.gz
OLD_FILES+=usr/share/man/man3/bt_devreq.3.gz
OLD_FILES+=usr/share/man/man3/bt_devsend.3.gz
OLD_FILES+=usr/share/man/man3/bt_endhostent.3.gz
OLD_FILES+=usr/share/man/man3/bt_endprotoent.3.gz
OLD_FILES+=usr/share/man/man3/bt_gethostbyaddr.3.gz
OLD_FILES+=usr/share/man/man3/bt_gethostbyname.3.gz
OLD_FILES+=usr/share/man/man3/bt_gethostent.3.gz
OLD_FILES+=usr/share/man/man3/bt_getprotobyname.3.gz
OLD_FILES+=usr/share/man/man3/bt_getprotobynumber.3.gz
OLD_FILES+=usr/share/man/man3/bt_getprotoent.3.gz
OLD_FILES+=usr/share/man/man3/bt_ntoa.3.gz
OLD_FILES+=usr/share/man/man3/bt_sethostent.3.gz
OLD_FILES+=usr/share/man/man3/bt_setprotoent.3.gz
OLD_FILES+=usr/share/man/man3/sdp.3.gz
OLD_FILES+=usr/share/man/man3/sdp_attr2desc.3.gz
OLD_FILES+=usr/share/man/man3/sdp_change_service.3.gz
OLD_FILES+=usr/share/man/man3/sdp_close.3.gz
OLD_FILES+=usr/share/man/man3/sdp_error.3.gz
OLD_FILES+=usr/share/man/man3/sdp_open.3.gz
OLD_FILES+=usr/share/man/man3/sdp_open_local.3.gz
OLD_FILES+=usr/share/man/man3/sdp_register_service.3.gz
OLD_FILES+=usr/share/man/man3/sdp_search.3.gz
OLD_FILES+=usr/share/man/man3/sdp_unregister_service.3.gz
OLD_FILES+=usr/share/man/man3/sdp_uuid2desc.3.gz
OLD_FILES+=usr/share/man/man4/ng_bluetooth.4.gz
OLD_FILES+=usr/share/man/man5/bluetooth.device.conf.5.gz
OLD_FILES+=usr/share/man/man5/bluetooth.hosts.5.gz
OLD_FILES+=usr/share/man/man5/bluetooth.protocols.5.gz
OLD_FILES+=usr/share/man/man5/hcsecd.conf.5.gz
OLD_FILES+=usr/share/man/man8/ath3kfw.8.gz
OLD_FILES+=usr/share/man/man8/bcmfw.8.gz
OLD_FILES+=usr/share/man/man8/bt3cfw.8.gz
OLD_FILES+=usr/share/man/man8/bthidcontrol.8.gz
OLD_FILES+=usr/share/man/man8/bthidd.8.gz
OLD_FILES+=usr/share/man/man8/btpand.8.gz
OLD_FILES+=usr/share/man/man8/hccontrol.8.gz
OLD_FILES+=usr/share/man/man8/hcsecd.8.gz
OLD_FILES+=usr/share/man/man8/hcseriald.8.gz
OLD_FILES+=usr/share/man/man8/l2control.8.gz
OLD_FILES+=usr/share/man/man8/l2ping.8.gz
OLD_FILES+=usr/share/man/man8/rfcomm_pppd.8.gz
OLD_FILES+=usr/share/man/man8/sdpcontrol.8.gz
OLD_FILES+=usr/share/man/man8/sdpd.8.gz
.endif
.if ${MK_BOOT} == no
OLD_FILES+=boot/beastie.4th
OLD_FILES+=boot/boot
OLD_FILES+=boot/boot0
OLD_FILES+=boot/boot0sio
OLD_FILES+=boot/boot1
OLD_FILES+=boot/boot1.efi
OLD_FILES+=boot/boot1.efifat
OLD_FILES+=boot/boot2
OLD_FILES+=boot/brand.4th
OLD_FILES+=boot/cdboot
OLD_FILES+=boot/check-password.4th
OLD_FILES+=boot/color.4th
OLD_FILES+=boot/defaults/loader.conf
OLD_FILES+=boot/delay.4th
OLD_FILES+=boot/device.hints
OLD_FILES+=boot/frames.4th
OLD_FILES+=boot/gptboot
OLD_FILES+=boot/gptzfsboot
OLD_FILES+=boot/loader
OLD_FILES+=boot/loader.4th
OLD_FILES+=boot/loader.efi
OLD_FILES+=boot/loader.help
OLD_FILES+=boot/loader.rc
OLD_FILES+=boot/mbr
OLD_FILES+=boot/menu-commands.4th
OLD_FILES+=boot/menu.4th
OLD_FILES+=boot/menu.rc
OLD_FILES+=boot/menusets.4th
OLD_FILES+=boot/pcibios.4th
OLD_FILES+=boot/pmbr
OLD_FILES+=boot/pxeboot
OLD_FILES+=boot/screen.4th
OLD_FILES+=boot/shortcuts.4th
OLD_FILES+=boot/support.4th
OLD_FILES+=boot/userboot.so
OLD_FILES+=boot/version.4th
OLD_FILES+=boot/zfsboot
OLD_FILES+=boot/zfsloader
OLD_FILES+=usr/lib/kgzldr.o
OLD_FILES+=usr/share/man/man5/loader.conf.5.gz
OLD_FILES+=usr/share/man/man8/beastie.4th.8.gz
OLD_FILES+=usr/share/man/man8/brand.4th.8.gz
OLD_FILES+=usr/share/man/man8/check-password.4th.8.gz
OLD_FILES+=usr/share/man/man8/color.4th.8.gz
OLD_FILES+=usr/share/man/man8/delay.4th.8.gz
OLD_FILES+=usr/share/man/man8/gptboot.8.gz
OLD_FILES+=usr/share/man/man8/gptzfsboot.8.gz
OLD_FILES+=usr/share/man/man8/loader.4th.8.gz
OLD_FILES+=usr/share/man/man8/loader.8.gz
OLD_FILES+=usr/share/man/man8/menu.4th.8.gz
OLD_FILES+=usr/share/man/man8/menusets.4th.8.gz
OLD_FILES+=usr/share/man/man8/pxeboot.8.gz
OLD_FILES+=usr/share/man/man8/version.4th.8.gz
OLD_FILES+=usr/share/man/man8/zfsboot.8.gz
OLD_FILES+=usr/share/man/man8/zfsloader.8.gz
.endif
.if ${MK_BOOTPARAMD} == no
OLD_FILES+=usr/sbin/bootparamd
OLD_FILES+=usr/share/man/man5/bootparams.5.gz
OLD_FILES+=usr/share/man/man8/bootparamd.8.gz
OLD_FILES+=usr/sbin/callbootd
.endif
.if ${MK_BOOTPD} == no
OLD_FILES+=usr/libexec/bootpd
OLD_FILES+=usr/share/man/man5/bootptab.5.gz
OLD_FILES+=usr/share/man/man8/bootpd.8.gz
OLD_FILES+=usr/libexec/bootpgw
OLD_FILES+=usr/sbin/bootpef
OLD_FILES+=usr/share/man/man8/bootpef.8.gz
OLD_FILES+=usr/sbin/bootptest
OLD_FILES+=usr/share/man/man8/bootptest.8.gz
.endif
.if ${MK_BSD_CPIO} == no
OLD_FILES+=usr/bin/bsdcpio
OLD_FILES+=usr/bin/cpio
OLD_FILES+=usr/share/man/man1/bsdcpio.1.gz
OLD_FILES+=usr/share/man/man1/cpio.1.gz
.endif
.if ${MK_BSDINSTALL} == no
OLD_FILES+=usr/libexec/bsdinstall/adduser
OLD_FILES+=usr/libexec/bsdinstall/auto
OLD_FILES+=usr/libexec/bsdinstall/autopart
OLD_FILES+=usr/libexec/bsdinstall/bootconfig
OLD_FILES+=usr/libexec/bsdinstall/checksum
OLD_FILES+=usr/libexec/bsdinstall/config
OLD_FILES+=usr/libexec/bsdinstall/distextract
OLD_FILES+=usr/libexec/bsdinstall/distfetch
OLD_FILES+=usr/libexec/bsdinstall/docsinstall
OLD_FILES+=usr/libexec/bsdinstall/entropy
OLD_FILES+=usr/libexec/bsdinstall/hardening
OLD_FILES+=usr/libexec/bsdinstall/hostname
OLD_FILES+=usr/libexec/bsdinstall/jail
OLD_FILES+=usr/libexec/bsdinstall/keymap
OLD_FILES+=usr/libexec/bsdinstall/mirrorselect
OLD_FILES+=usr/libexec/bsdinstall/mount
OLD_FILES+=usr/libexec/bsdinstall/netconfig
OLD_FILES+=usr/libexec/bsdinstall/netconfig_ipv4
OLD_FILES+=usr/libexec/bsdinstall/netconfig_ipv6
OLD_FILES+=usr/libexec/bsdinstall/partedit
OLD_FILES+=usr/libexec/bsdinstall/rootpass
OLD_FILES+=usr/libexec/bsdinstall/script
OLD_FILES+=usr/libexec/bsdinstall/scriptedpart
OLD_FILES+=usr/libexec/bsdinstall/services
OLD_FILES+=usr/libexec/bsdinstall/time
OLD_FILES+=usr/libexec/bsdinstall/umount
OLD_FILES+=usr/libexec/bsdinstall/wlanconfig
OLD_FILES+=usr/libexec/bsdinstall/zfsboot
OLD_FILES+=usr/sbin/bsdinstall
OLD_FILES+=usr/share/man/man8/bsdinstall.8.gz
OLD_FILES+=usr/share/man/man8/sade.8.gz
OLD_DIRS+=usr/libexec/bsdinstall
.endif
.if ${MK_BSNMP} == no
OLD_FILES+=etc/snmpd.config
OLD_FILES+=etc/rc.d/bsnmpd
OLD_FILES+=usr/bin/bsnmpget
OLD_FILES+=usr/bin/bsnmpset
OLD_FILES+=usr/bin/bsnmpwalk
OLD_FILES+=usr/include/bsnmp/asn1.h
OLD_FILES+=usr/include/bsnmp/bridge_snmp.h
OLD_FILES+=usr/include/bsnmp/snmp.h
OLD_FILES+=usr/include/bsnmp/snmp_mibII.h
OLD_FILES+=usr/include/bsnmp/snmp_netgraph.h
OLD_FILES+=usr/include/bsnmp/snmpagent.h
OLD_FILES+=usr/include/bsnmp/snmpclient.h
OLD_FILES+=usr/include/bsnmp/snmpmod.h
OLD_FILES+=usr/lib/libbsnmp.a
OLD_FILES+=usr/lib/libbsnmp.so
OLD_LIBS+=usr/lib/libbsnmp.so.6
OLD_FILES+=usr/lib/libbsnmp_p.a
OLD_FILES+=usr/lib/libbsnmptools.a
OLD_FILES+=usr/lib/libbsnmptools.so
OLD_LIBS+=usr/lib/libbsnmptools.so.0
OLD_FILES+=usr/lib/libbsnmptools_p.a
OLD_FILES+=usr/lib/snmp_bridge.so
OLD_LIBS+=usr/lib/snmp_bridge.so.6
OLD_FILES+=usr/lib/snmp_hast.so
OLD_LIBS+=usr/lib/snmp_hast.so.6
OLD_FILES+=usr/lib/snmp_hostres.so
OLD_LIBS+=usr/lib/snmp_hostres.so.6
OLD_FILES+=usr/lib/snmp_lm75.so
OLD_LIBS+=usr/lib/snmp_lm75.so.6
OLD_FILES+=usr/lib/snmp_mibII.so
OLD_LIBS+=usr/lib/snmp_mibII.so.6
OLD_FILES+=usr/lib/snmp_netgraph.so
OLD_LIBS+=usr/lib/snmp_netgraph.so.6
OLD_FILES+=usr/lib/snmp_pf.so
OLD_LIBS+=usr/lib/snmp_pf.so.6
OLD_FILES+=usr/lib/snmp_target.so
OLD_LIBS+=usr/lib/snmp_target.so.6
OLD_FILES+=usr/lib/snmp_usm.so
OLD_LIBS+=usr/lib/snmp_usm.so.6
OLD_FILES+=usr/lib/snmp_vacm.so
OLD_LIBS+=usr/lib/snmp_vacm.so.6
OLD_FILES+=usr/lib/snmp_wlan.so
OLD_LIBS+=usr/lib/snmp_wlan.so.6
OLD_FILES+=usr/lib32/libbsnmp.a
OLD_FILES+=usr/lib32/libbsnmp.so
OLD_LIBS+=usr/lib32/libbsnmp.so.6
OLD_FILES+=usr/lib32/libbsnmp_p.a
OLD_FILES+=usr/sbin/bsnmpd
OLD_FILES+=usr/sbin/gensnmptree
OLD_FILES+=usr/share/examples/etc/snmpd.config
OLD_FILES+=usr/share/man/man1/bsnmpd.1.gz
OLD_FILES+=usr/share/man/man1/bsnmpget.1.gz
OLD_FILES+=usr/share/man/man1/bsnmpset.1.gz
OLD_FILES+=usr/share/man/man1/bsnmpwalk.1.gz
OLD_FILES+=usr/share/man/man1/gensnmptree.1.gz
# lib/libbsnmp/libbsnmp
OLD_FILES+=usr/share/man/man3/TRUTH_GET.3.gz
OLD_FILES+=usr/share/man/man3/TRUTH_MK.3.gz
OLD_FILES+=usr/share/man/man3/TRUTH_OK.3.gz
OLD_FILES+=usr/share/man/man3/asn1.3.gz
OLD_FILES+=usr/share/man/man3/asn_append_oid.3.gz
OLD_FILES+=usr/share/man/man3/asn_commit_header.3.gz
OLD_FILES+=usr/share/man/man3/asn_compare_oid.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_counter64_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_header.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_integer.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_integer_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_ipaddress.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_ipaddress_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_null.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_null_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_objid.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_objid_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_octetstring.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_octetstring_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_sequence.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_timeticks.3.gz
OLD_FILES+=usr/share/man/man3/asn_get_uint32_raw.3.gz
OLD_FILES+=usr/share/man/man3/asn_is_suboid.3.gz
OLD_FILES+=usr/share/man/man3/asn_oid2str.3.gz
OLD_FILES+=usr/share/man/man3/asn_oid2str_r.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_counter64.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_exception.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_header.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_integer.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_ipaddress.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_null.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_objid.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_octetstring.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_temp_header.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_timeticks.3.gz
OLD_FILES+=usr/share/man/man3/asn_put_uint32.3.gz
OLD_FILES+=usr/share/man/man3/asn_skip.3.gz
OLD_FILES+=usr/share/man/man3/asn_slice_oid.3.gz
OLD_FILES+=usr/share/man/man3/snmp_add_binding.3.gz
OLD_FILES+=usr/share/man/man3/snmp_calc_keychange.3.gz
OLD_FILES+=usr/share/man/man3/snmp_client.3.gz
OLD_FILES+=usr/share/man/man3/snmp_client_init.3.gz
OLD_FILES+=usr/share/man/man3/snmp_client_set_host.3.gz
OLD_FILES+=usr/share/man/man3/snmp_client_set_port.3.gz
OLD_FILES+=usr/share/man/man3/snmp_close.3.gz
OLD_FILES+=usr/share/man/man3/snmp_debug.3.gz
OLD_FILES+=usr/share/man/man3/snmp_dep_commit.3.gz
OLD_FILES+=usr/share/man/man3/snmp_dep_finish.3.gz
OLD_FILES+=usr/share/man/man3/snmp_dep_lookup.3.gz
OLD_FILES+=usr/share/man/man3/snmp_dep_rollback.3.gz
OLD_FILES+=usr/share/man/man3/snmp_depop_t.3.gz
OLD_FILES+=usr/share/man/man3/snmp_dialog.3.gz
OLD_FILES+=usr/share/man/man3/snmp_discover_engine.3.gz
OLD_FILES+=usr/share/man/man3/snmp_get.3.gz
OLD_FILES+=usr/share/man/man3/snmp_get_local_keys.3.gz
OLD_FILES+=usr/share/man/man3/snmp_getbulk.3.gz
OLD_FILES+=usr/share/man/man3/snmp_getnext.3.gz
OLD_FILES+=usr/share/man/man3/snmp_init_context.3.gz
OLD_FILES+=usr/share/man/man3/snmp_make_errresp.3.gz
OLD_FILES+=usr/share/man/man3/snmp_oid_append.3.gz
OLD_FILES+=usr/share/man/man3/snmp_op_t.3.gz
OLD_FILES+=usr/share/man/man3/snmp_open.3.gz
OLD_FILES+=usr/share/man/man3/snmp_parse_server.3.gz
OLD_FILES+=usr/share/man/man3/snmp_passwd_to_keys.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_check.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_create.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_decode.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_decode_header.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_decode_scoped.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_decode_secmode.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_dump.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_encode.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_free.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_init_secparams.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_send.3.gz
OLD_FILES+=usr/share/man/man3/snmp_receive.3.gz
OLD_FILES+=usr/share/man/man3/snmp_send_cb_f.3.gz
OLD_FILES+=usr/share/man/man3/snmp_set.3.gz
OLD_FILES+=usr/share/man/man3/snmp_table_cb_f.3.gz
OLD_FILES+=usr/share/man/man3/snmp_table_fetch.3.gz
OLD_FILES+=usr/share/man/man3/snmp_table_fetch_async.3.gz
OLD_FILES+=usr/share/man/man3/snmp_timeout_cb_f.3.gz
OLD_FILES+=usr/share/man/man3/snmp_timeout_start_f.3.gz
OLD_FILES+=usr/share/man/man3/snmp_timeout_stop_f.3.gz
OLD_FILES+=usr/share/man/man3/snmp_trace.3.gz
OLD_FILES+=usr/share/man/man3/snmp_value_copy.3.gz
OLD_FILES+=usr/share/man/man3/snmp_value_free.3.gz
OLD_FILES+=usr/share/man/man3/snmp_value_parse.3.gz
OLD_FILES+=usr/share/man/man3/tree_size.3.gz
# usr.sbin/bsnmpd/bsnmpd
OLD_FILES+=usr/share/man/man3/FIND_OBJECT_INT.3.gz
OLD_FILES+=usr/share/man/man3/FIND_OBJECT_INT_LINK.3.gz
OLD_FILES+=usr/share/man/man3/FIND_OBJECT_INT_LINK_INDEX.3.gz
OLD_FILES+=usr/share/man/man3/FIND_OBJECT_OID.3.gz
OLD_FILES+=usr/share/man/man3/FIND_OBJECT_OID_LINK.3.gz
OLD_FILES+=usr/share/man/man3/FIND_OBJECT_OID_LINK_INDEX.3.gz
OLD_FILES+=usr/share/man/man3/INSERT_OBJECT_INT.3.gz
OLD_FILES+=usr/share/man/man3/INSERT_OBJECT_INT_LINK.3.gz
OLD_FILES+=usr/share/man/man3/INSERT_OBJECT_INT_LINK_INDEX.3.gz
OLD_FILES+=usr/share/man/man3/INSERT_OBJECT_OID.3.gz
OLD_FILES+=usr/share/man/man3/INSERT_OBJECT_OID_LINK.3.gz
OLD_FILES+=usr/share/man/man3/INSERT_OBJECT_OID_LINK_INDEX.3.gz
OLD_FILES+=usr/share/man/man3/NEXT_OBJECT_INT.3.gz
OLD_FILES+=usr/share/man/man3/NEXT_OBJECT_INT_LINK.3.gz
OLD_FILES+=usr/share/man/man3/NEXT_OBJECT_INT_LINK_INDEX.3.gz
OLD_FILES+=usr/share/man/man3/NEXT_OBJECT_OID.3.gz
OLD_FILES+=usr/share/man/man3/NEXT_OBJECT_OID_LINK.3.gz
OLD_FILES+=usr/share/man/man3/NEXT_OBJECT_OID_LINK_INDEX.3.gz
OLD_FILES+=usr/share/man/man3/asn1.3.gz
OLD_FILES+=usr/share/man/man3/bsnmpagent.3.gz
OLD_FILES+=usr/share/man/man3/bsnmpclient.3.gz
OLD_FILES+=usr/share/man/man3/bsnmpd_get_target_stats.3.gz
OLD_FILES+=usr/share/man/man3/bsnmpd_get_usm_stats.3.gz
OLD_FILES+=usr/share/man/man3/bsnmpd_reset_usm_stats.3.gz
OLD_FILES+=usr/share/man/man3/bsnmplib.3.gz
OLD_FILES+=usr/share/man/man3/buf_alloc.3.gz
OLD_FILES+=usr/share/man/man3/buf_size.3.gz
OLD_FILES+=usr/share/man/man3/comm_define.3.gz
OLD_FILES+=usr/share/man/man3/community.3.gz
OLD_FILES+=usr/share/man/man3/fd_deselect.3.gz
OLD_FILES+=usr/share/man/man3/fd_resume.3.gz
OLD_FILES+=usr/share/man/man3/fd_select.3.gz
OLD_FILES+=usr/share/man/man3/fd_suspend.3.gz
OLD_FILES+=usr/share/man/man3/get_ticks.3.gz
OLD_FILES+=usr/share/man/man3/index_append.3.gz
OLD_FILES+=usr/share/man/man3/index_append_off.3.gz
OLD_FILES+=usr/share/man/man3/index_compare.3.gz
OLD_FILES+=usr/share/man/man3/index_compare_off.3.gz
OLD_FILES+=usr/share/man/man3/index_decode.3.gz
OLD_FILES+=usr/share/man/man3/ip_commit.3.gz
OLD_FILES+=usr/share/man/man3/ip_get.3.gz
OLD_FILES+=usr/share/man/man3/ip_rollback.3.gz
OLD_FILES+=usr/share/man/man3/ip_save.3.gz
OLD_FILES+=usr/share/man/man3/or_register.3.gz
OLD_FILES+=usr/share/man/man3/or_unregister.3.gz
OLD_FILES+=usr/share/man/man3/oid_commit.3.gz
OLD_FILES+=usr/share/man/man3/oid_get.3.gz
OLD_FILES+=usr/share/man/man3/oid_rollback.3.gz
OLD_FILES+=usr/share/man/man3/oid_save.3.gz
OLD_FILES+=usr/share/man/man3/oid_usmNotInTimeWindows.3.gz
OLD_FILES+=usr/share/man/man3/oid_usmUnknownEngineIDs.3.gz
OLD_FILES+=usr/share/man/man3/oid_zeroDotZero.3.gz
OLD_FILES+=usr/share/man/man3/reqid_allocate.3.gz
OLD_FILES+=usr/share/man/man3/reqid_base.3.gz
OLD_FILES+=usr/share/man/man3/reqid_istype.3.gz
OLD_FILES+=usr/share/man/man3/reqid_next.3.gz
OLD_FILES+=usr/share/man/man3/reqid_type.3.gz
OLD_FILES+=usr/share/man/man3/snmp_bridge.3.gz
OLD_FILES+=usr/share/man/man3/snmp_hast.3.gz
OLD_FILES+=usr/share/man/man3/snmp_hostres.3.gz
OLD_FILES+=usr/share/man/man3/snmp_input_finish.3.gz
OLD_FILES+=usr/share/man/man3/snmp_input_start.3.gz
OLD_FILES+=usr/share/man/man3/snmp_lm75.3.gz
OLD_FILES+=usr/share/man/man3/snmp_mibII.3.gz
OLD_FILES+=usr/share/man/man3/snmp_netgraph.3.gz
OLD_FILES+=usr/share/man/man3/snmp_output.3.gz
OLD_FILES+=usr/share/man/man3/snmp_pdu_auth_access.3.gz
OLD_FILES+=usr/share/man/man3/snmp_send_port.3.gz
OLD_FILES+=usr/share/man/man3/snmp_send_trap.3.gz
OLD_FILES+=usr/share/man/man3/snmp_target.3.gz
OLD_FILES+=usr/share/man/man3/snmp_usm.3.gz
OLD_FILES+=usr/share/man/man3/snmp_vacm.3.gz
OLD_FILES+=usr/share/man/man3/snmp_wlan.3.gz
OLD_FILES+=usr/share/man/man3/snmpd_target_stat.3.gz
OLD_FILES+=usr/share/man/man3/snmpd_usmstats.3.gz
OLD_FILES+=usr/share/man/man3/snmpmod.3.gz
OLD_FILES+=usr/share/man/man3/start_tick.3.gz
OLD_FILES+=usr/share/man/man3/string_commit.3.gz
OLD_FILES+=usr/share/man/man3/string_free.3.gz
OLD_FILES+=usr/share/man/man3/string_get.3.gz
OLD_FILES+=usr/share/man/man3/string_get_max.3.gz
OLD_FILES+=usr/share/man/man3/string_rollback.3.gz
OLD_FILES+=usr/share/man/man3/string_save.3.gz
OLD_FILES+=usr/share/man/man3/systemg.3.gz
OLD_FILES+=usr/share/man/man3/this_tick.3.gz
OLD_FILES+=usr/share/man/man3/timer_start.3.gz
OLD_FILES+=usr/share/man/man3/timer_start_repeat.3.gz
OLD_FILES+=usr/share/man/man3/timer_stop.3.gz
OLD_FILES+=usr/share/man/man3/target_activate_address.3.gz
OLD_FILES+=usr/share/man/man3/target_address.3.gz
OLD_FILES+=usr/share/man/man3/target_delete_address.3.gz
OLD_FILES+=usr/share/man/man3/target_delete_notify.3.gz
OLD_FILES+=usr/share/man/man3/target_delete_param.3.gz
OLD_FILES+=usr/share/man/man3/target_first_address.3.gz
OLD_FILES+=usr/share/man/man3/target_first_notify.3.gz
OLD_FILES+=usr/share/man/man3/target_first_param.3.gz
OLD_FILES+=usr/share/man/man3/target_flush_all.3.gz
OLD_FILES+=usr/share/man/man3/target_next_address.3.gz
OLD_FILES+=usr/share/man/man3/target_next_notify.3.gz
OLD_FILES+=usr/share/man/man3/target_next_param.3.gz
OLD_FILES+=usr/share/man/man3/target_new_address.3.gz
OLD_FILES+=usr/share/man/man3/target_new_notify.3.gz
OLD_FILES+=usr/share/man/man3/target_new_param.3.gz
OLD_FILES+=usr/share/man/man3/target_notify.3.gz
OLD_FILES+=usr/share/man/man3/target_param.3.gz
OLD_FILES+=usr/share/man/man3/usm_delete_user.3.gz
OLD_FILES+=usr/share/man/man3/usm_find_user.3.gz
OLD_FILES+=usr/share/man/man3/usm_first_user.3.gz
OLD_FILES+=usr/share/man/man3/usm_flush_users.3.gz
OLD_FILES+=usr/share/man/man3/usm_next_user.3.gz
OLD_FILES+=usr/share/man/man3/usm_new_user.3.gz
OLD_FILES+=usr/share/man/man3/usm_user.3.gz
OLD_FILES+=usr/share/snmp/defs/bridge_tree.def
OLD_FILES+=usr/share/snmp/defs/hast_tree.def
OLD_FILES+=usr/share/snmp/defs/hostres_tree.def
OLD_FILES+=usr/share/snmp/defs/lm75_tree.def
OLD_FILES+=usr/share/snmp/defs/mibII_tree.def
OLD_FILES+=usr/share/snmp/defs/netgraph_tree.def
OLD_FILES+=usr/share/snmp/defs/pf_tree.def
OLD_FILES+=usr/share/snmp/defs/target_tree.def
OLD_FILES+=usr/share/snmp/defs/tree.def
OLD_FILES+=usr/share/snmp/defs/usm_tree.def
OLD_FILES+=usr/share/snmp/defs/vacm_tree.def
OLD_FILES+=usr/share/snmp/defs/wlan_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM-FREEBSD-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-ATM.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-BRIDGE-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-HAST-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-HOSTRES-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-IP-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-LM75-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-MIB2-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-NETGRAPH.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-PF-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-SNMPD.txt
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-WIRELESS-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/BRIDGE-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/FOKUS-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/FREEBSD-MIB.txt
OLD_FILES+=usr/share/snmp/mibs/RSTP-MIB.txt
OLD_DIRS+=usr/include/bsnmp
OLD_DIRS+=usr/share/snmp
OLD_DIRS+=usr/share/snmp/defs
OLD_DIRS+=usr/share/snmp/mibs
.endif
.if ${MK_CALENDAR} == no
OLD_FILES+=etc/periodic/daily/300.calendar
OLD_FILES+=usr/bin/calendar
OLD_FILES+=usr/share/calendar/calendar.all
OLD_FILES+=usr/share/calendar/calendar.australia
OLD_FILES+=usr/share/calendar/calendar.birthday
OLD_FILES+=usr/share/calendar/calendar.brazilian
OLD_FILES+=usr/share/calendar/calendar.christian
OLD_FILES+=usr/share/calendar/calendar.computer
OLD_FILES+=usr/share/calendar/calendar.croatian
OLD_FILES+=usr/share/calendar/calendar.dutch
OLD_FILES+=usr/share/calendar/calendar.freebsd
OLD_FILES+=usr/share/calendar/calendar.french
OLD_FILES+=usr/share/calendar/calendar.german
OLD_FILES+=usr/share/calendar/calendar.history
OLD_FILES+=usr/share/calendar/calendar.holiday
OLD_FILES+=usr/share/calendar/calendar.hungarian
OLD_FILES+=usr/share/calendar/calendar.judaic
OLD_FILES+=usr/share/calendar/calendar.lotr
OLD_FILES+=usr/share/calendar/calendar.music
OLD_FILES+=usr/share/calendar/calendar.newzealand
OLD_FILES+=usr/share/calendar/calendar.russian
OLD_FILES+=usr/share/calendar/calendar.southafrica
OLD_FILES+=usr/share/calendar/calendar.ukrainian
OLD_FILES+=usr/share/calendar/calendar.usholiday
OLD_FILES+=usr/share/calendar/calendar.world
OLD_FILES+=usr/share/calendar/de_AT.ISO_8859-15/calendar.feiertag
OLD_DIRS+=usr/share/calendar/de_AT.ISO_8859-15
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.all
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.feiertag
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.geschichte
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.kirche
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.literatur
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.musik
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-1/calendar.wissenschaft
OLD_DIRS+=usr/share/calendar/de_DE.ISO8859-1
OLD_FILES+=usr/share/calendar/de_DE.ISO8859-15
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.all
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.fetes
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.french
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.jferies
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-1/calendar.proverbes
OLD_DIRS+=usr/share/calendar/fr_FR.ISO8859-1
OLD_FILES+=usr/share/calendar/fr_FR.ISO8859-15
OLD_FILES+=usr/share/calendar/hr_HR.ISO8859-2/calendar.all
OLD_FILES+=usr/share/calendar/hr_HR.ISO8859-2/calendar.praznici
OLD_DIRS+=usr/share/calendar/hr_HR.ISO8859-2
OLD_FILES+=usr/share/calendar/hu_HU.ISO8859-2/calendar.all
OLD_FILES+=usr/share/calendar/hu_HU.ISO8859-2/calendar.nevnapok
OLD_FILES+=usr/share/calendar/hu_HU.ISO8859-2/calendar.unnepek
OLD_DIRS+=usr/share/calendar/hu_HU.ISO8859-2
OLD_FILES+=usr/share/calendar/pt_BR.ISO8859-1/calendar.all
OLD_FILES+=usr/share/calendar/pt_BR.ISO8859-1/calendar.commemorative
OLD_FILES+=usr/share/calendar/pt_BR.ISO8859-1/calendar.holidays
OLD_FILES+=usr/share/calendar/pt_BR.ISO8859-1/calendar.mcommemorative
OLD_DIRS+=usr/share/calendar/pt_BR.ISO8859-1
OLD_FILES+=usr/share/calendar/pt_BR.UTF-8/calendar.all
OLD_FILES+=usr/share/calendar/pt_BR.UTF-8/calendar.commemorative
OLD_FILES+=usr/share/calendar/pt_BR.UTF-8/calendar.holidays
OLD_FILES+=usr/share/calendar/pt_BR.UTF-8/calendar.mcommemorative
OLD_DIRS+=usr/share/calendar/pt_BR.UTF-8
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.all
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.common
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.holiday
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.military
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.orthodox
OLD_FILES+=usr/share/calendar/ru_RU.KOI8-R/calendar.pagan
OLD_DIRS+=usr/share/calendar/ru_RU.KOI8-R
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.all
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.common
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.holiday
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.military
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.orthodox
OLD_FILES+=usr/share/calendar/ru_RU.UTF-8/calendar.pagan
OLD_DIRS+=usr/share/calendar/ru_RU.UTF-8
OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.all
OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.holiday
OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.misc
OLD_FILES+=usr/share/calendar/uk_UA.KOI8-U/calendar.orthodox
OLD_DIRS+=usr/share/calendar/uk_UA.KOI8-U
OLD_DIRS+=usr/share/calendar
OLD_FILES+=usr/share/man/man1/calendar.1.gz
OLD_FILES+=usr/tests/usr.bin/calendar/Kyuafile
OLD_FILES+=usr/tests/usr.bin/calendar/calendar.calibrate
OLD_FILES+=usr/tests/usr.bin/calendar/legacy_test
OLD_FILES+=usr/tests/usr.bin/calendar/regress.a1.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.a2.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.a3.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.a4.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.a5.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.b1.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.b2.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.b3.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.b4.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.b5.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.s1.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.s2.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.s3.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.s4.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.sh
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-1.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-2.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-3.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-4.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-5.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-6.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.w0-7.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-1.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-2.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-3.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-4.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-5.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-6.out
OLD_FILES+=usr/tests/usr.bin/calendar/regress.wn-7.out
OLD_DIRS+=usr/tests/usr.bin/calendar
.endif
.if ${MK_CASPER} == no
OLD_FILES+=etc/casper/system.dns
OLD_FILES+=etc/casper/system.grp
OLD_FILES+=etc/casper/system.pwd
OLD_FILES+=etc/casper/system.random
OLD_FILES+=etc/casper/system.sysctl
OLD_FILES+=etc/rc.d/casperd
OLD_LIBS+=lib/libcapsicum.so.0
OLD_LIBS+=lib/libcasper.so.0
OLD_FILES+=libexec/casper/dns
OLD_FILES+=libexec/casper/grp
OLD_FILES+=libexec/casper/pwd
OLD_FILES+=libexec/casper/random
OLD_FILES+=libexec/casper/sysctl
OLD_FILES+=sbin/casper
OLD_FILES+=sbin/casperd
OLD_FILES+=usr/include/libcapsicum.h
OLD_FILES+=usr/include/libcapsicum_dns.h
OLD_FILES+=usr/include/libcapsicum_grp.h
OLD_FILES+=usr/include/libcapsicum_pwd.h
OLD_FILES+=usr/include/libcapsicum_random.h
OLD_FILES+=usr/include/libcapsicum_service.h
OLD_FILES+=usr/include/libcapsicum_sysctl.h
OLD_FILES+=usr/include/libcasper.h
OLD_FILES+=usr/lib/libcapsicum.a
OLD_FILES+=usr/lib/libcapsicum.so
OLD_FILES+=usr/lib/libcapsicum_p.a
OLD_FILES+=usr/lib/libcasper.a
OLD_FILES+=usr/lib/libcasper.so
OLD_FILES+=usr/lib/libcasper_p.a
OLD_FILES+=usr/lib32/libcapsicum.a
OLD_FILES+=usr/lib32/libcapsicum.so
OLD_LIBS+=usr/lib32/libcapsicum.so.0
OLD_FILES+=usr/lib32/libcapsicum_p.a
OLD_FILES+=usr/lib32/libcasper.a
OLD_FILES+=usr/lib32/libcasper.so
OLD_LIBS+=usr/lib32/libcasper.so.0
OLD_FILES+=usr/lib32/libcasper_p.a
OLD_FILES+=usr/share/man/man3/cap_clone.3.gz
OLD_FILES+=usr/share/man/man3/cap_close.3.gz
OLD_FILES+=usr/share/man/man3/cap_init.3.gz
OLD_FILES+=usr/share/man/man3/cap_limit_get.3.gz
OLD_FILES+=usr/share/man/man3/cap_limit_set.3.gz
OLD_FILES+=usr/share/man/man3/cap_recv_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/cap_send_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/cap_service_open.3.gz
OLD_FILES+=usr/share/man/man3/cap_sock.3.gz
OLD_FILES+=usr/share/man/man3/cap_unwrap.3.gz
OLD_FILES+=usr/share/man/man3/cap_wrap.3.gz
OLD_FILES+=usr/share/man/man3/cap_xfer_nvlist.3.gz
OLD_FILES+=usr/share/man/man3/libcapsicum.3.gz
OLD_FILES+=usr/share/man/man8/casperd.8.gz
.endif
.if ${MK_CCD} == no
OLD_FILES+=etc/rc.d/ccd
OLD_FILES+=rescue/ccdconfig
OLD_FILES+=sbin/ccdconfig
OLD_FILES+=usr/share/man/man4/ccd.4.gz
OLD_FILES+=usr/share/man/man8/ccdconfig.8.gz
.endif
.if ${MK_CDDL} == no
OLD_LIBS+=lib/libavl.so.2
OLD_LIBS+=lib/libctf.so.2
OLD_LIBS+=lib/libdtrace.so.2
OLD_LIBS+=lib/libnvpair.so.2
OLD_LIBS+=lib/libumem.so.2
OLD_LIBS+=lib/libuutil.so.2
OLD_FILES+=usr/bin/ctfconvert
OLD_FILES+=usr/bin/ctfdump
OLD_FILES+=usr/bin/ctfmerge
OLD_FILES+=usr/lib/dtrace/drti.o
OLD_FILES+=usr/lib/dtrace/errno.d
OLD_FILES+=usr/lib/dtrace/io.d
OLD_FILES+=usr/lib/dtrace/ip.d
OLD_FILES+=usr/lib/dtrace/psinfo.d
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/lib/dtrace/regs_x86.d
.endif
OLD_FILES+=usr/lib/dtrace/signal.d
OLD_FILES+=usr/lib/dtrace/tcp.d
OLD_FILES+=usr/lib/dtrace/udp.d
OLD_FILES+=usr/lib/dtrace/unistd.d
OLD_FILES+=usr/lib/libavl.a
OLD_FILES+=usr/lib/libavl.so
OLD_FILES+=usr/lib/libavl_p.a
OLD_FILES+=usr/lib/libctf.a
OLD_FILES+=usr/lib/libctf.so
OLD_FILES+=usr/lib/libctf_p.a
OLD_FILES+=usr/lib/libdtrace.a
OLD_FILES+=usr/lib/libdtrace.so
OLD_FILES+=usr/lib/libdtrace_p.a
OLD_FILES+=usr/lib/libnvpair.a
OLD_FILES+=usr/lib/libnvpair.so
OLD_FILES+=usr/lib/libnvpair_p.a
OLD_FILES+=usr/lib/libumem.a
OLD_FILES+=usr/lib/libumem.so
OLD_FILES+=usr/lib/libumem_p.a
OLD_FILES+=usr/lib/libuutil.a
OLD_FILES+=usr/lib/libuutil.so
OLD_FILES+=usr/lib/libuutil_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/dtrace/drti.o
OLD_FILES+=usr/lib32/libavl.a
OLD_FILES+=usr/lib32/libavl.so
OLD_LIBS+=usr/lib32/libavl.so.2
OLD_FILES+=usr/lib32/libavl_p.a
OLD_FILES+=usr/lib32/libctf.a
OLD_FILES+=usr/lib32/libctf.so
OLD_LIBS+=usr/lib32/libctf.so.2
OLD_FILES+=usr/lib32/libctf_p.a
OLD_FILES+=usr/lib32/libdtrace.a
OLD_FILES+=usr/lib32/libdtrace.so
OLD_LIBS+=usr/lib32/libdtrace.so.2
OLD_FILES+=usr/lib32/libdtrace_p.a
OLD_FILES+=usr/lib32/libnvpair.a
OLD_FILES+=usr/lib32/libnvpair.so
OLD_LIBS+=usr/lib32/libnvpair.so.2
OLD_FILES+=usr/lib32/libnvpair_p.a
OLD_FILES+=usr/lib32/libumem.a
OLD_FILES+=usr/lib32/libumem.so
OLD_LIBS+=usr/lib32/libumem.so.2
OLD_FILES+=usr/lib32/libumem_p.a
OLD_FILES+=usr/lib32/libuutil.a
OLD_FILES+=usr/lib32/libuutil.so
OLD_LIBS+=usr/lib32/libuutil.so.2
OLD_FILES+=usr/lib32/libuutil_p.a
.endif
OLD_LIBS+=lib/libdtrace.so.2
OLD_FILES+=usr/sbin/dtrace
OLD_FILES+=usr/sbin/lockstat
OLD_FILES+=usr/sbin/plockstat
OLD_FILES+=usr/share/man/man1/dtrace.1.gz
OLD_FILES+=usr/share/man/man1/dtruss.1.gz
OLD_FILES+=usr/share/man/man1/lockstat.1.gz
OLD_FILES+=usr/share/man/man1/plockstat.1.gz
OLD_FILES+=usr/share/dtrace/disklatency
OLD_FILES+=usr/share/dtrace/disklatencycmd
OLD_FILES+=usr/share/dtrace/hotopen
OLD_FILES+=usr/share/dtrace/nfsclienttime
OLD_FILES+=usr/share/dtrace/toolkit/execsnoop
OLD_FILES+=usr/share/dtrace/toolkit/hotkernel
OLD_FILES+=usr/share/dtrace/toolkit/hotuser
OLD_FILES+=usr/share/dtrace/toolkit/opensnoop
OLD_FILES+=usr/share/dtrace/toolkit/procsystime
OLD_FILES+=usr/share/dtrace/tcpconn
OLD_FILES+=usr/share/dtrace/tcpstate
OLD_FILES+=usr/share/dtrace/tcptrack
OLD_FILES+=usr/share/dtrace/udptrack
OLD_FILES+=usr/share/man/man1/dtrace.1.gz
OLD_DIRS+=usr/lib/dtrace
OLD_DIRS+=usr/lib32/dtrace
OLD_DIRS+=usr/share/dtrace/toolkit
OLD_DIRS+=usr/share/dtrace
.endif
.if ${MK_ZFS} == no
OLD_FILES+=boot/gptzfsboot
OLD_FILES+=boot/zfsboot
OLD_FILES+=boot/zfsloader
OLD_FILES+=etc/rc.d/zfs
OLD_FILES+=etc/rc.d/zfsd
OLD_FILES+=etc/rc.d/zfsbe
OLD_FILES+=etc/rc.d/zvol
OLD_FILES+=etc/devd/zfs.conf
OLD_FILES+=etc/periodic/daily/404.status-zfs
OLD_FILES+=etc/periodic/daily/800.scrub-zfs
OLD_LIBS+=lib/libzfs.so.2
OLD_LIBS+=lib/libzfs.so.3
OLD_LIBS+=lib/libzfs_core.so.2
OLD_LIBS+=lib/libzpool.so.2
OLD_FILES+=rescue/zdb
OLD_FILES+=rescue/zfs
OLD_FILES+=rescue/zpool
OLD_FILES+=sbin/bectl
OLD_FILES+=sbin/zfs
OLD_FILES+=sbin/zpool
OLD_FILES+=sbin/zfsbootcfg
OLD_FILES+=usr/bin/zinject
OLD_FILES+=usr/bin/zstreamdump
OLD_FILES+=usr/bin/ztest
OLD_FILES+=usr/lib/libbe.a
OLD_FILES+=usr/lib/libbe_p.a
OLD_FILES+=usr/lib/libbe.so
OLD_LIBS+=lib/libbe.so.1
OLD_FILES+=usr/lib/libzfs.a
OLD_LIBS+=usr/lib/libzfs.so
OLD_FILES+=usr/lib/libzfs_core.a
OLD_LIBS+=usr/lib/libzfs_core.so
OLD_LIBS+=usr/lib/libzfs_core_p.a
OLD_FILES+=usr/lib/libzfs_p.a
OLD_FILES+=usr/lib/libzpool.a
OLD_FILES+=usr/lib/libzpool.so
OLD_LIBS+=usr/lib/libzpool.so.2
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libzfs.a
OLD_FILES+=usr/lib32/libzfs.so
OLD_LIBS+=usr/lib32/libzfs.so.2
OLD_LIBS+=usr/lib32/libzfs.so.3
OLD_FILES+=usr/lib32/libzfs_core.a
OLD_FILES+=usr/lib32/libzfs_core.so
OLD_LIBS+=usr/lib32/libzfs_core.so.2
OLD_FILES+=usr/lib32/libzfs_core_p.a
OLD_FILES+=usr/lib32/libzfs_p.a
OLD_FILES+=usr/lib32/libzpool.a
OLD_FILES+=usr/lib32/libzpool.so
OLD_LIBS+=usr/lib32/libzpool.so.2
.endif
OLD_FILES+=usr/sbin/zfsd
OLD_FILES+=usr/sbin/zhack
OLD_FILES+=usr/sbin/zdb
OLD_FILES+=usr/share/man/man3/libbe.3.gz
OLD_FILES+=usr/share/man/man7/zpool-features.7.gz
OLD_FILES+=usr/share/man/man8/bectl.8.gz
OLD_FILES+=usr/share/man/man8/gptzfsboot.8.gz
OLD_FILES+=usr/share/man/man8/zdb.8.gz
OLD_FILES+=usr/share/man/man8/zfs-program.8.gz
OLD_FILES+=usr/share/man/man8/zfs.8.gz
OLD_FILES+=usr/share/man/man8/zfsboot.8.gz
OLD_FILES+=usr/share/man/man8/zfsbootcfg.8.gz
OLD_FILES+=usr/share/man/man8/zfsd.8.gz
OLD_FILES+=usr/share/man/man8/zfsloader.8.gz
OLD_FILES+=usr/share/man/man8/zpool.8.gz
.endif
.if ${MK_CLANG} == no
OLD_FILES+=usr/bin/clang
OLD_FILES+=usr/bin/clang++
OLD_FILES+=usr/bin/clang-cpp
OLD_FILES+=usr/bin/clang-tblgen
OLD_FILES+=usr/bin/llvm-objdump
OLD_FILES+=usr/bin/llvm-tblgen
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/allocator_interface.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/asan_interface.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/common_interface_defs.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/coverage_interface.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/dfsan_interface.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/esan_interface.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/hwasan_interface.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/linux_syscall_hooks.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/lsan_interface.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/msan_interface.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/netbsd_syscall_hooks.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/scudo_interface.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/tsan_interface.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sanitizer/tsan_interface_atomic.h
-OLD_DIRS+=usr/lib/clang/7.0.0/include/sanitizer
-OLD_FILES+=usr/lib/clang/7.0.0/include/__clang_cuda_builtin_vars.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/__clang_cuda_cmath.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/__clang_cuda_complex_builtins.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/__clang_cuda_device_functions.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/__clang_cuda_intrinsics.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/__clang_cuda_libdevice_declares.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/__clang_cuda_math_forward_declares.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/__clang_cuda_runtime_wrapper.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/__stddef_max_align_t.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/__wmmintrin_aes.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/__wmmintrin_pclmul.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/adxintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/altivec.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/ammintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/arm64intr.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/arm_acle.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/arm_fp16.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/arm_neon.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/armintr.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx2intrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512bitalgintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512bwintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512cdintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512dqintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512erintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512fintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512ifmaintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512ifmavlintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512pfintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vbmi2intrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vbmiintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vbmivlintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vlbitalgintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vlbwintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vlcdintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vldqintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vlintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vlvbmi2intrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vlvnniintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vnniintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vpopcntdqintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avx512vpopcntdqvlintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/avxintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/bmi2intrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/bmiintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/cetintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/cldemoteintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/clflushoptintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/clwbintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/clzerointrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/cpuid.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/emmintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/f16cintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/fma4intrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/fmaintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/fxsrintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/gfniintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/htmintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/htmxlintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/ia32intrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/immintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/invpcidintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/lwpintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/lzcntintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/mm3dnow.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/mm_malloc.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/mmintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/module.modulemap
-OLD_FILES+=usr/lib/clang/7.0.0/include/movdirintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/msa.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/mwaitxintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/nmmintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/opencl-c.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/pconfigintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/pkuintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/pmmintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/popcntintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/prfchwintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/ptwriteintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/rdseedintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/rtmintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/s390intrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/sgxintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/shaintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/smmintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/tbmintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/tmmintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/vadefs.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/vaesintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/vecintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/vpclmulqdqintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/waitpkgintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/wbnoinvdintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/wmmintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/x86intrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/xmmintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/xopintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/xsavecintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/xsaveintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/xsaveoptintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/xsavesintrin.h
-OLD_FILES+=usr/lib/clang/7.0.0/include/xtestintrin.h
-OLD_DIRS+=usr/lib/clang/7.0.0/include
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.asan-i386.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.asan-i386.so
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.asan-preinit-i386.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.asan-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.asan-x86_64.so
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.msan-i386.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.msan-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.msan_cxx-i386.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.profile-arm.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.profile-armhf.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.profile-i386.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.profile-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.safestack-i386.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.safestack-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.stats-i386.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.stats-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.stats_client-i386.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.stats_client-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.tsan-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
-OLD_FILES+=usr/lib/clang/7.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
-OLD_DIRS+=usr/lib/clang/7.0.0/lib/freebsd
-OLD_DIRS+=usr/lib/clang/7.0.0/lib
-OLD_DIRS+=usr/lib/clang/7.0.0
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/allocator_interface.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/asan_interface.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/common_interface_defs.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/coverage_interface.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/dfsan_interface.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/esan_interface.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/hwasan_interface.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/linux_syscall_hooks.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/lsan_interface.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/msan_interface.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/netbsd_syscall_hooks.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/scudo_interface.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/tsan_interface.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sanitizer/tsan_interface_atomic.h
+OLD_DIRS+=usr/lib/clang/7.0.1/include/sanitizer
+OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_builtin_vars.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_cmath.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_complex_builtins.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_device_functions.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_intrinsics.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_libdevice_declares.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_math_forward_declares.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/__clang_cuda_runtime_wrapper.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/__stddef_max_align_t.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/__wmmintrin_aes.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/__wmmintrin_pclmul.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/adxintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/altivec.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/ammintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/arm64intr.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/arm_acle.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/arm_fp16.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/arm_neon.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/armintr.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx2intrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512bitalgintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512bwintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512cdintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512dqintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512erintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512fintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512ifmaintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512ifmavlintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512pfintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vbmi2intrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vbmiintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vbmivlintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlbitalgintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlbwintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlcdintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vldqintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlvbmi2intrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vlvnniintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vnniintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vpopcntdqintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avx512vpopcntdqvlintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/avxintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/bmi2intrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/bmiintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/cetintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/cldemoteintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/clflushoptintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/clwbintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/clzerointrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/cpuid.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/emmintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/f16cintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/fma4intrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/fmaintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/fxsrintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/gfniintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/htmintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/htmxlintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/ia32intrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/immintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/invpcidintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/lwpintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/lzcntintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/mm3dnow.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/mm_malloc.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/mmintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/module.modulemap
+OLD_FILES+=usr/lib/clang/7.0.1/include/movdirintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/msa.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/mwaitxintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/nmmintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/opencl-c.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/pconfigintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/pkuintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/pmmintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/popcntintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/prfchwintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/ptwriteintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/rdseedintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/rtmintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/s390intrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/sgxintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/shaintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/smmintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/tbmintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/tmmintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/vadefs.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/vaesintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/vecintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/vpclmulqdqintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/waitpkgintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/wbnoinvdintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/wmmintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/x86intrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/xmmintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/xopintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/xsavecintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/xsaveintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/xsaveoptintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/xsavesintrin.h
+OLD_FILES+=usr/lib/clang/7.0.1/include/xtestintrin.h
+OLD_DIRS+=usr/lib/clang/7.0.1/include
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-i386.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-i386.so
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-preinit-i386.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-preinit-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan-x86_64.so
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.msan-i386.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.msan-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.msan_cxx-i386.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.msan_cxx-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.profile-arm.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.profile-armhf.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.profile-i386.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.profile-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.safestack-i386.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.safestack-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.stats-i386.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.stats-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.stats_client-i386.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.stats_client-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.tsan-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.tsan_cxx-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_minimal-i386.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_minimal-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_standalone-i386.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_standalone-x86_64.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-i386.a
+OLD_FILES+=usr/lib/clang/7.0.1/lib/freebsd/libclang_rt.ubsan_standalone_cxx-x86_64.a
+OLD_DIRS+=usr/lib/clang/7.0.1/lib/freebsd
+OLD_DIRS+=usr/lib/clang/7.0.1/lib
+OLD_DIRS+=usr/lib/clang/7.0.1
OLD_DIRS+=usr/lib/clang
OLD_FILES+=usr/share/doc/llvm/clang/LICENSE.TXT
OLD_DIRS+=usr/share/doc/llvm/clang
OLD_FILES+=usr/share/doc/llvm/COPYRIGHT.regex
OLD_FILES+=usr/share/doc/llvm/LICENSE.TXT
OLD_DIRS+=usr/share/doc/llvm
OLD_FILES+=usr/share/man/man1/clang.1.gz
OLD_FILES+=usr/share/man/man1/clang++.1.gz
OLD_FILES+=usr/share/man/man1/clang-cpp.1.gz
OLD_FILES+=usr/share/man/man1/llvm-tblgen.1.gz
.endif
.if ${MK_CLANG_EXTRAS} == no
OLD_FILES+=usr/bin/bugpoint
OLD_FILES+=usr/bin/clang-format
OLD_FILES+=usr/bin/llc
OLD_FILES+=usr/bin/lli
OLD_FILES+=usr/bin/llvm-ar
OLD_FILES+=usr/bin/llvm-as
OLD_FILES+=usr/bin/llvm-bcanalyzer
OLD_FILES+=usr/bin/llvm-cxxdump
OLD_FILES+=usr/bin/llvm-cxxfilt
OLD_FILES+=usr/bin/llvm-diff
OLD_FILES+=usr/bin/llvm-dis
OLD_FILES+=usr/bin/llvm-dwarfdump
OLD_FILES+=usr/bin/llvm-extract
OLD_FILES+=usr/bin/llvm-link
OLD_FILES+=usr/bin/llvm-lto
OLD_FILES+=usr/bin/llvm-lto2
OLD_FILES+=usr/bin/llvm-mc
OLD_FILES+=usr/bin/llvm-mca
OLD_FILES+=usr/bin/llvm-modextract
OLD_FILES+=usr/bin/llvm-nm
OLD_FILES+=usr/bin/llvm-objcopy
OLD_FILES+=usr/bin/llvm-pdbutil
OLD_FILES+=usr/bin/llvm-ranlib
OLD_FILES+=usr/bin/llvm-rtdyld
OLD_FILES+=usr/bin/llvm-symbolizer
OLD_FILES+=usr/bin/llvm-xray
OLD_FILES+=usr/bin/opt
OLD_FILES+=usr/share/man/man1/bugpoint.1.gz
OLD_FILES+=usr/share/man/man1/llc.1.gz
OLD_FILES+=usr/share/man/man1/lli.1.gz
OLD_FILES+=usr/share/man/man1/llvm-ar.1.gz
OLD_FILES+=usr/share/man/man1/llvm-as.1.gz
OLD_FILES+=usr/share/man/man1/llvm-bcanalyzer.1.gz
OLD_FILES+=usr/share/man/man1/llvm-diff.1.gz
OLD_FILES+=usr/share/man/man1/llvm-dis.1.gz
OLD_FILES+=usr/share/man/man1/llvm-dwarfdump.1
OLD_FILES+=usr/share/man/man1/llvm-extract.1.gz
OLD_FILES+=usr/share/man/man1/llvm-link.1.gz
OLD_FILES+=usr/share/man/man1/llvm-nm.1.gz
OLD_FILES+=usr/share/man/man1/llvm-pdbutil.1.gz
OLD_FILES+=usr/share/man/man1/llvm-symbolizer.1.gz
OLD_FILES+=usr/share/man/man1/opt.1.gz
.endif
.if ${MK_CPP} == no
OLD_FILES+=usr/bin/cpp
OLD_FILES+=usr/share/man/man1/cpp.1.gz
.endif
.if ${MK_CTM} == no
OLD_FILES+=usr/sbin/ctm
OLD_FILES+=usr/sbin/ctm_dequeue
OLD_FILES+=usr/sbin/ctm_rmail
OLD_FILES+=usr/sbin/ctm_smail
OLD_FILES+=usr/share/man/man1/ctm.1.gz
OLD_FILES+=usr/share/man/man1/ctm_dequeue.1.gz
OLD_FILES+=usr/share/man/man1/ctm_rmail.1.gz
OLD_FILES+=usr/share/man/man1/ctm_smail.1.gz
OLD_FILES+=usr/share/man/man5/ctm.5.gz
.endif
.if ${MK_CUSE} == no
OLD_FILES+=usr/include/fs/cuse/cuse_defs.h
OLD_FILES+=usr/include/fs/cuse/cuse_ioctl.h
OLD_FILES+=usr/include/cuse.h
OLD_FILES+=usr/lib/libcuse.a
OLD_LIBS+=usr/lib/libcuse.so.1
OLD_FILES+=usr/lib/libcuse_p.a
OLD_FILES+=usr/share/man/man3/cuse.3.gz
OLD_FILES+=usr/share/man/man3/cuse_alloc_unit_number.3.gz
OLD_FILES+=usr/share/man/man3/cuse_alloc_unit_number_by_id.3.gz
OLD_FILES+=usr/share/man/man3/cuse_copy_in.3.gz
OLD_FILES+=usr/share/man/man3/cuse_copy_out.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_create.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_destroy.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_get_current.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_get_per_file_handle.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_get_priv0.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_get_priv1.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_set_per_file_handle.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_set_priv0.3.gz
OLD_FILES+=usr/share/man/man3/cuse_dev_set_priv1.3.gz
OLD_FILES+=usr/share/man/man3/cuse_free_unit_number.3.gz
OLD_FILES+=usr/share/man/man3/cuse_free_unit_number_by_id.3.gz
OLD_FILES+=usr/share/man/man3/cuse_get_local.3.gz
OLD_FILES+=usr/share/man/man3/cuse_got_peer_signal.3.gz
OLD_FILES+=usr/share/man/man3/cuse_init.3.gz
OLD_FILES+=usr/share/man/man3/cuse_is_vmalloc_addr.3.gz
OLD_FILES+=usr/share/man/man3/cuse_poll_wakeup.3.gz
OLD_FILES+=usr/share/man/man3/cuse_set_local.3.gz
OLD_FILES+=usr/share/man/man3/cuse_uninit.3.gz
OLD_FILES+=usr/share/man/man3/cuse_vmalloc.3.gz
OLD_FILES+=usr/share/man/man3/cuse_vmfree.3.gz
OLD_FILES+=usr/share/man/man3/cuse_vmoffset.3.gz
OLD_FILES+=usr/share/man/man3/cuse_wait_and_process.3.gz
OLD_DIRS+=usr/include/fs/cuse
.endif
# devd(8) not listed here on purpose
.if ${MK_CXX} == no
OLD_FILES+=usr/bin/CC
OLD_FILES+=usr/bin/c++
OLD_FILES+=usr/bin/g++
OLD_FILES+=usr/libexec/cc1plus
.endif
.if ${MK_DEBUG_FILES} == no
.if exists(${DESTDIR}/usr/lib/debug)
DEBUG_DIRS!=find ${DESTDIR}/usr/lib/debug -mindepth 1 \
-type d \! -path "${DESTDIR}/usr/lib/debug/boot/*" \
| sed -e 's,^${DESTDIR}/,,'; echo
DEBUG_FILES!=find ${DESTDIR}/usr/lib/debug \
\! -type d \! -path "${DESTDIR}/usr/lib/debug/boot/*" \! -name "lib*.so*" \
| sed -e 's,^${DESTDIR}/,,'; echo
DEBUG_LIBS!=find ${DESTDIR}/usr/lib/debug \! -type d -name "lib*.so*" \
| sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${DEBUG_DIRS}
OLD_FILES+=${DEBUG_FILES}
OLD_LIBS+=${DEBUG_LIBS}
.endif
.endif
.if ${MK_DIALOG} == no
OLD_FILES+=usr/bin/dialog
OLD_FILES+=usr/bin/dpv
OLD_FILES+=usr/lib/libdialog.a
OLD_FILES+=usr/lib/libdialog.so
OLD_FILES+=usr/lib/libdialog.so.8
OLD_FILES+=usr/lib/libdialog_p.a
OLD_FILES+=usr/lib/libdpv.a
OLD_FILES+=usr/lib/libdpv.so
OLD_FILES+=usr/lib/libdpv.so.1
OLD_FILES+=usr/lib/libdpv_p.a
OLD_FILES+=usr/sbin/bsdconfig
OLD_FILES+=usr/share/man/man1/dialog.1.gz
OLD_FILES+=usr/share/man/man1/dpv.1.gz
OLD_FILES+=usr/share/man/man3/dialog.3.gz
OLD_FILES+=usr/share/man/man3/dpv.3.gz
OLD_FILES+=usr/share/man/man8/bsdconfig.8.gz
OLD_DIRS+=usr/share/bsdconfig
OLD_DIRS+=usr/share/bsdconfig/media
OLD_DIRS+=usr/share/bsdconfig/networking
OLD_DIRS+=usr/share/bsdconfig/packages
OLD_DIRS+=usr/share/bsdconfig/password
OLD_DIRS+=usr/share/bsdconfig/startup
OLD_DIRS+=usr/share/bsdconfig/timezone
OLD_DIRS+=usr/share/bsdconfig/usermgmt
.endif
.if ${MK_EFI} == no
OLD_FILES+=usr/sbin/efibootmgr
OLD_FILES+=usr/sbin/efidp
OLD_FILES+=usr/sbin/efivar
OLD_FILES+=usr/sbin/uefisign
OLD_FILES+=usr/share/examples/uefisign/uefikeys
.endif
.if ${MK_FMTREE} == no
OLD_FILES+=usr/sbin/fmtree
OLD_FILES+=usr/share/man/man8/fmtree.8.gz
.endif
.if ${MK_FTP} == no
OLD_FILES+=etc/ftpusers
OLD_FILES+=etc/newsyslog.conf.d/ftp.conf
OLD_FILES+=etc/pam.d/ftp
OLD_FILES+=etc/pam.d/ftpd
OLD_FILES+=etc/rc.d/ftpd
OLD_FILES+=etc/syslog.d/ftp.conf
OLD_FILES+=usr/bin/ftp
OLD_FILES+=usr/bin/gate-ftp
OLD_FILES+=usr/bin/pftp
OLD_FILES+=usr/libexec/ftpd
OLD_FILES+=usr/share/man/man1/ftp.1.gz
OLD_FILES+=usr/share/man/man1/gate-ftp.1.gz
OLD_FILES+=usr/share/man/man1/pftp.1.gz
OLD_FILES+=usr/share/man/man5/ftpchroot.5.gz
OLD_FILES+=usr/share/man/man8/ftpd.8.gz
.endif
.if ${MK_GNUCXX} == no
OLD_FILES+=usr/bin/g++
OLD_FILES+=usr/include/c++/4.2/algorithm
OLD_FILES+=usr/include/c++/4.2/backward/algo.h
OLD_FILES+=usr/include/c++/4.2/backward/algobase.h
OLD_FILES+=usr/include/c++/4.2/backward/alloc.h
OLD_FILES+=usr/include/c++/4.2/backward/backward_warning.h
OLD_FILES+=usr/include/c++/4.2/backward/bvector.h
OLD_FILES+=usr/include/c++/4.2/backward/complex.h
OLD_FILES+=usr/include/c++/4.2/backward/defalloc.h
OLD_FILES+=usr/include/c++/4.2/backward/deque.h
OLD_FILES+=usr/include/c++/4.2/backward/fstream.h
OLD_FILES+=usr/include/c++/4.2/backward/function.h
OLD_FILES+=usr/include/c++/4.2/backward/hash_map.h
OLD_FILES+=usr/include/c++/4.2/backward/hash_set.h
OLD_FILES+=usr/include/c++/4.2/backward/hashtable.h
OLD_FILES+=usr/include/c++/4.2/backward/heap.h
OLD_FILES+=usr/include/c++/4.2/backward/iomanip.h
OLD_FILES+=usr/include/c++/4.2/backward/iostream.h
OLD_FILES+=usr/include/c++/4.2/backward/istream.h
OLD_FILES+=usr/include/c++/4.2/backward/iterator.h
OLD_FILES+=usr/include/c++/4.2/backward/list.h
OLD_FILES+=usr/include/c++/4.2/backward/map.h
OLD_FILES+=usr/include/c++/4.2/backward/multimap.h
OLD_FILES+=usr/include/c++/4.2/backward/multiset.h
OLD_FILES+=usr/include/c++/4.2/backward/new.h
OLD_FILES+=usr/include/c++/4.2/backward/ostream.h
OLD_FILES+=usr/include/c++/4.2/backward/pair.h
OLD_FILES+=usr/include/c++/4.2/backward/queue.h
OLD_FILES+=usr/include/c++/4.2/backward/rope.h
OLD_FILES+=usr/include/c++/4.2/backward/set.h
OLD_FILES+=usr/include/c++/4.2/backward/slist.h
OLD_FILES+=usr/include/c++/4.2/backward/stack.h
OLD_FILES+=usr/include/c++/4.2/backward/stream.h
OLD_FILES+=usr/include/c++/4.2/backward/streambuf.h
OLD_FILES+=usr/include/c++/4.2/backward/strstream
OLD_FILES+=usr/include/c++/4.2/backward/tempbuf.h
OLD_FILES+=usr/include/c++/4.2/backward/tree.h
OLD_FILES+=usr/include/c++/4.2/backward/vector.h
OLD_FILES+=usr/include/c++/4.2/bits/allocator.h
OLD_FILES+=usr/include/c++/4.2/bits/atomic_word.h
OLD_FILES+=usr/include/c++/4.2/bits/basic_file.h
OLD_FILES+=usr/include/c++/4.2/bits/basic_ios.h
OLD_FILES+=usr/include/c++/4.2/bits/basic_ios.tcc
OLD_FILES+=usr/include/c++/4.2/bits/basic_string.h
OLD_FILES+=usr/include/c++/4.2/bits/basic_string.tcc
OLD_FILES+=usr/include/c++/4.2/bits/boost_concept_check.h
OLD_FILES+=usr/include/c++/4.2/bits/c++allocator.h
OLD_FILES+=usr/include/c++/4.2/bits/c++config.h
OLD_FILES+=usr/include/c++/4.2/bits/c++io.h
OLD_FILES+=usr/include/c++/4.2/bits/c++locale.h
OLD_FILES+=usr/include/c++/4.2/bits/c++locale_internal.h
OLD_FILES+=usr/include/c++/4.2/bits/char_traits.h
OLD_FILES+=usr/include/c++/4.2/bits/cmath.tcc
OLD_FILES+=usr/include/c++/4.2/bits/codecvt.h
OLD_FILES+=usr/include/c++/4.2/bits/compatibility.h
OLD_FILES+=usr/include/c++/4.2/bits/concept_check.h
OLD_FILES+=usr/include/c++/4.2/bits/cpp_type_traits.h
OLD_FILES+=usr/include/c++/4.2/bits/cpu_defines.h
OLD_FILES+=usr/include/c++/4.2/bits/ctype_base.h
OLD_FILES+=usr/include/c++/4.2/bits/ctype_inline.h
OLD_FILES+=usr/include/c++/4.2/bits/ctype_noninline.h
OLD_FILES+=usr/include/c++/4.2/bits/cxxabi_tweaks.h
OLD_FILES+=usr/include/c++/4.2/bits/deque.tcc
OLD_FILES+=usr/include/c++/4.2/bits/fstream.tcc
OLD_FILES+=usr/include/c++/4.2/bits/functexcept.h
OLD_FILES+=usr/include/c++/4.2/bits/gslice.h
OLD_FILES+=usr/include/c++/4.2/bits/gslice_array.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr-default.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr-posix.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr-single.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr-tpf.h
OLD_FILES+=usr/include/c++/4.2/bits/gthr.h
OLD_FILES+=usr/include/c++/4.2/bits/indirect_array.h
OLD_FILES+=usr/include/c++/4.2/bits/ios_base.h
OLD_FILES+=usr/include/c++/4.2/bits/istream.tcc
OLD_FILES+=usr/include/c++/4.2/bits/list.tcc
OLD_FILES+=usr/include/c++/4.2/bits/locale_classes.h
OLD_FILES+=usr/include/c++/4.2/bits/locale_facets.h
OLD_FILES+=usr/include/c++/4.2/bits/locale_facets.tcc
OLD_FILES+=usr/include/c++/4.2/bits/localefwd.h
OLD_FILES+=usr/include/c++/4.2/bits/mask_array.h
OLD_FILES+=usr/include/c++/4.2/bits/messages_members.h
OLD_FILES+=usr/include/c++/4.2/bits/os_defines.h
OLD_FILES+=usr/include/c++/4.2/bits/ostream.tcc
OLD_FILES+=usr/include/c++/4.2/bits/ostream_insert.h
OLD_FILES+=usr/include/c++/4.2/bits/postypes.h
OLD_FILES+=usr/include/c++/4.2/bits/slice_array.h
OLD_FILES+=usr/include/c++/4.2/bits/sstream.tcc
OLD_FILES+=usr/include/c++/4.2/bits/stl_algo.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_algobase.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_bvector.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_construct.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_deque.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_function.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_heap.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_iterator.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_iterator_base_funcs.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_iterator_base_types.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_list.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_map.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_multimap.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_multiset.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_numeric.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_pair.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_queue.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_raw_storage_iter.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_relops.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_set.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_stack.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_tempbuf.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_tree.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_uninitialized.h
OLD_FILES+=usr/include/c++/4.2/bits/stl_vector.h
OLD_FILES+=usr/include/c++/4.2/bits/stream_iterator.h
OLD_FILES+=usr/include/c++/4.2/bits/streambuf.tcc
OLD_FILES+=usr/include/c++/4.2/bits/streambuf_iterator.h
OLD_FILES+=usr/include/c++/4.2/bits/stringfwd.h
OLD_FILES+=usr/include/c++/4.2/bits/time_members.h
OLD_FILES+=usr/include/c++/4.2/bits/valarray_after.h
OLD_FILES+=usr/include/c++/4.2/bits/valarray_array.h
OLD_FILES+=usr/include/c++/4.2/bits/valarray_array.tcc
OLD_FILES+=usr/include/c++/4.2/bits/valarray_before.h
OLD_FILES+=usr/include/c++/4.2/bits/vector.tcc
OLD_FILES+=usr/include/c++/4.2/bitset
OLD_FILES+=usr/include/c++/4.2/cassert
OLD_FILES+=usr/include/c++/4.2/cctype
OLD_FILES+=usr/include/c++/4.2/cerrno
OLD_FILES+=usr/include/c++/4.2/cfloat
OLD_FILES+=usr/include/c++/4.2/ciso646
OLD_FILES+=usr/include/c++/4.2/climits
OLD_FILES+=usr/include/c++/4.2/clocale
OLD_FILES+=usr/include/c++/4.2/cmath
OLD_FILES+=usr/include/c++/4.2/complex
OLD_FILES+=usr/include/c++/4.2/csetjmp
OLD_FILES+=usr/include/c++/4.2/csignal
OLD_FILES+=usr/include/c++/4.2/cstdarg
OLD_FILES+=usr/include/c++/4.2/cstddef
OLD_FILES+=usr/include/c++/4.2/cstdio
OLD_FILES+=usr/include/c++/4.2/cstdlib
OLD_FILES+=usr/include/c++/4.2/cstring
OLD_FILES+=usr/include/c++/4.2/ctime
OLD_FILES+=usr/include/c++/4.2/cwchar
OLD_FILES+=usr/include/c++/4.2/cwctype
OLD_FILES+=usr/include/c++/4.2/cxxabi.h
OLD_FILES+=usr/include/c++/4.2/debug/bitset
OLD_FILES+=usr/include/c++/4.2/debug/debug.h
OLD_FILES+=usr/include/c++/4.2/debug/deque
OLD_FILES+=usr/include/c++/4.2/debug/formatter.h
OLD_FILES+=usr/include/c++/4.2/debug/functions.h
OLD_FILES+=usr/include/c++/4.2/debug/hash_map
OLD_FILES+=usr/include/c++/4.2/debug/hash_map.h
OLD_FILES+=usr/include/c++/4.2/debug/hash_multimap.h
OLD_FILES+=usr/include/c++/4.2/debug/hash_multiset.h
OLD_FILES+=usr/include/c++/4.2/debug/hash_set
OLD_FILES+=usr/include/c++/4.2/debug/hash_set.h
OLD_FILES+=usr/include/c++/4.2/debug/list
OLD_FILES+=usr/include/c++/4.2/debug/macros.h
OLD_FILES+=usr/include/c++/4.2/debug/map
OLD_FILES+=usr/include/c++/4.2/debug/map.h
OLD_FILES+=usr/include/c++/4.2/debug/multimap.h
OLD_FILES+=usr/include/c++/4.2/debug/multiset.h
OLD_FILES+=usr/include/c++/4.2/debug/safe_base.h
OLD_FILES+=usr/include/c++/4.2/debug/safe_iterator.h
OLD_FILES+=usr/include/c++/4.2/debug/safe_iterator.tcc
OLD_FILES+=usr/include/c++/4.2/debug/safe_sequence.h
OLD_FILES+=usr/include/c++/4.2/debug/set
OLD_FILES+=usr/include/c++/4.2/debug/set.h
OLD_FILES+=usr/include/c++/4.2/debug/string
OLD_FILES+=usr/include/c++/4.2/debug/vector
OLD_FILES+=usr/include/c++/4.2/deque
OLD_FILES+=usr/include/c++/4.2/exception
OLD_FILES+=usr/include/c++/4.2/exception_defines.h
OLD_FILES+=usr/include/c++/4.2/ext/algorithm
OLD_FILES+=usr/include/c++/4.2/ext/array_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/atomicity.h
OLD_FILES+=usr/include/c++/4.2/ext/bitmap_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/codecvt_specializations.h
OLD_FILES+=usr/include/c++/4.2/ext/concurrence.h
OLD_FILES+=usr/include/c++/4.2/ext/debug_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/functional
OLD_FILES+=usr/include/c++/4.2/ext/hash_fun.h
OLD_FILES+=usr/include/c++/4.2/ext/hash_map
OLD_FILES+=usr/include/c++/4.2/ext/hash_set
OLD_FILES+=usr/include/c++/4.2/ext/hashtable.h
OLD_FILES+=usr/include/c++/4.2/ext/iterator
OLD_FILES+=usr/include/c++/4.2/ext/malloc_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/memory
OLD_FILES+=usr/include/c++/4.2/ext/mt_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/new_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/numeric
OLD_FILES+=usr/include/c++/4.2/ext/numeric_traits.h
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/assoc_container.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy/basic_tree_policy_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy/null_node_metadata.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_tree_policy/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/basic_types.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/bin_search_tree_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/cond_dtor_entry_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/cond_key_dtor_entry_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/node_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/point_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/r_erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/rotate_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/bin_search_tree_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/binary_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/const_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/const_point_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/entry_cmp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/entry_pred.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/resize_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binary_heap_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_/binomial_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/binomial_heap_base_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/binomial_heap_base_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/cc_ht_map_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/cmp_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/cond_key_dtor_entry_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/constructor_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/constructor_destructor_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/constructor_destructor_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/debug_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/debug_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/entry_list_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/erase_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/erase_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/find_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/insert_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/insert_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/resize_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/resize_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/resize_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/size_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/standard_policies.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cc_hash_table_map_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/cond_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/container_base_dispatch.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/eq_fn/eq_by_less.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/eq_fn/hash_eq_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/constructor_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/constructor_destructor_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/constructor_destructor_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/debug_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/debug_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/erase_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/erase_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/find_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/find_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/gp_ht_map_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/insert_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/insert_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/iterator_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/resize_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/resize_no_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/resize_store_hash_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/standard_policies.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/gp_hash_table_map_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/direct_mask_range_hashing_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/direct_mod_range_hashing_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/linear_probe_fn_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/mask_based_range_hashing.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/mod_based_range_hashing.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/probe_fn_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/quadratic_probe_fn_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/ranged_hash_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/ranged_probe_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_probe_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_range_hashing.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_ranged_hash_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/hash_fn/sample_ranged_probe_fn.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/const_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/const_point_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/left_child_next_sibling_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/node.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/null_metadata.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/left_child_next_sibling_heap_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/constructor_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/entry_metadata_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/lu_map_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_map_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/counter_lu_metadata.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/counter_lu_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/mtf_lu_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/list_update_policy/sample_update_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/map_debug_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/cond_dtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/node_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/ov_tree_map_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/ov_tree_map_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/pairing_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pairing_heap_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/child_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/cond_dtor_entry_dealtor.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/const_child_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/head.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/insert_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/internal_node.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/iterators_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/leaf.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/node_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/node_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/node_metadata_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/pat_trie_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/point_iterators.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/policy_access_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/r_erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/rotate_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/split_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/split_join_branch_bag.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/synth_e_access_traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/pat_trie_/update_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/priority_queue_base_dispatch.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/node.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/rb_tree_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rb_tree_map_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/rc.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/rc_binomial_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/rc_binomial_heap_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/cc_hash_max_collision_check_resize_trigger_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_exponential_size_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_load_check_resize_trigger_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_load_check_resize_trigger_size_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_prime_size_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/hash_standard_resize_policy_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/sample_resize_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/sample_resize_trigger.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/resize_policy/sample_size_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/info_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/node.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/splay_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/splay_tree_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/splay_tree_/traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/standard_policies.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/constructors_destructor_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/debug_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/erase_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/find_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/insert_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/split_join_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/thin_heap_.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/thin_heap_/trace_fn_imps.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/node_metadata_selector.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/null_node_update_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/order_statistics_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_policy/sample_tree_node_update.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/tree_trace_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/node_metadata_selector.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/null_node_update_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/order_statistics_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/prefix_search_node_update_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/sample_trie_e_access_traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/sample_trie_node_update.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/string_trie_e_access_traits_imp.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/trie_policy/trie_policy_base.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/type_utils.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/types_traits.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/const_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/const_point_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/detail/unordered_iterator/point_iterator.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/exception.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/hash_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/list_update_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/priority_queue.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/tag_and_trait.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/tree_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pb_ds/trie_policy.hpp
OLD_FILES+=usr/include/c++/4.2/ext/pod_char_traits.h
OLD_FILES+=usr/include/c++/4.2/ext/pool_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/rb_tree
OLD_FILES+=usr/include/c++/4.2/ext/rc_string_base.h
OLD_FILES+=usr/include/c++/4.2/ext/rope
OLD_FILES+=usr/include/c++/4.2/ext/ropeimpl.h
OLD_FILES+=usr/include/c++/4.2/ext/slist
OLD_FILES+=usr/include/c++/4.2/ext/sso_string_base.h
OLD_FILES+=usr/include/c++/4.2/ext/stdio_filebuf.h
OLD_FILES+=usr/include/c++/4.2/ext/stdio_sync_filebuf.h
OLD_FILES+=usr/include/c++/4.2/ext/throw_allocator.h
OLD_FILES+=usr/include/c++/4.2/ext/type_traits.h
OLD_FILES+=usr/include/c++/4.2/ext/typelist.h
OLD_FILES+=usr/include/c++/4.2/ext/vstring.h
OLD_FILES+=usr/include/c++/4.2/ext/vstring.tcc
OLD_FILES+=usr/include/c++/4.2/ext/vstring_fwd.h
OLD_FILES+=usr/include/c++/4.2/ext/vstring_util.h
OLD_FILES+=usr/include/c++/4.2/fstream
OLD_FILES+=usr/include/c++/4.2/functional
OLD_FILES+=usr/include/c++/4.2/iomanip
OLD_FILES+=usr/include/c++/4.2/ios
OLD_FILES+=usr/include/c++/4.2/iosfwd
OLD_FILES+=usr/include/c++/4.2/iostream
OLD_FILES+=usr/include/c++/4.2/istream
OLD_FILES+=usr/include/c++/4.2/iterator
OLD_FILES+=usr/include/c++/4.2/limits
OLD_FILES+=usr/include/c++/4.2/list
OLD_FILES+=usr/include/c++/4.2/locale
OLD_FILES+=usr/include/c++/4.2/map
OLD_FILES+=usr/include/c++/4.2/memory
OLD_FILES+=usr/include/c++/4.2/new
OLD_FILES+=usr/include/c++/4.2/numeric
OLD_FILES+=usr/include/c++/4.2/ostream
OLD_FILES+=usr/include/c++/4.2/queue
OLD_FILES+=usr/include/c++/4.2/set
OLD_FILES+=usr/include/c++/4.2/sstream
OLD_FILES+=usr/include/c++/4.2/stack
OLD_FILES+=usr/include/c++/4.2/stdexcept
OLD_FILES+=usr/include/c++/4.2/streambuf
OLD_FILES+=usr/include/c++/4.2/string
OLD_FILES+=usr/include/c++/4.2/tr1/array
OLD_FILES+=usr/include/c++/4.2/tr1/bind_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/bind_repeat.h
OLD_FILES+=usr/include/c++/4.2/tr1/boost_shared_ptr.h
OLD_FILES+=usr/include/c++/4.2/tr1/cctype
OLD_FILES+=usr/include/c++/4.2/tr1/cfenv
OLD_FILES+=usr/include/c++/4.2/tr1/cfloat
OLD_FILES+=usr/include/c++/4.2/tr1/cinttypes
OLD_FILES+=usr/include/c++/4.2/tr1/climits
OLD_FILES+=usr/include/c++/4.2/tr1/cmath
OLD_FILES+=usr/include/c++/4.2/tr1/common.h
OLD_FILES+=usr/include/c++/4.2/tr1/complex
OLD_FILES+=usr/include/c++/4.2/tr1/cstdarg
OLD_FILES+=usr/include/c++/4.2/tr1/cstdbool
OLD_FILES+=usr/include/c++/4.2/tr1/cstdint
OLD_FILES+=usr/include/c++/4.2/tr1/cstdio
OLD_FILES+=usr/include/c++/4.2/tr1/cstdlib
OLD_FILES+=usr/include/c++/4.2/tr1/ctgmath
OLD_FILES+=usr/include/c++/4.2/tr1/ctime
OLD_FILES+=usr/include/c++/4.2/tr1/ctype.h
OLD_FILES+=usr/include/c++/4.2/tr1/cwchar
OLD_FILES+=usr/include/c++/4.2/tr1/cwctype
OLD_FILES+=usr/include/c++/4.2/tr1/fenv.h
OLD_FILES+=usr/include/c++/4.2/tr1/float.h
OLD_FILES+=usr/include/c++/4.2/tr1/functional
OLD_FILES+=usr/include/c++/4.2/tr1/functional_hash.h
OLD_FILES+=usr/include/c++/4.2/tr1/functional_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/hashtable
OLD_FILES+=usr/include/c++/4.2/tr1/hashtable_policy.h
OLD_FILES+=usr/include/c++/4.2/tr1/inttypes.h
OLD_FILES+=usr/include/c++/4.2/tr1/limits.h
OLD_FILES+=usr/include/c++/4.2/tr1/math.h
OLD_FILES+=usr/include/c++/4.2/tr1/memory
OLD_FILES+=usr/include/c++/4.2/tr1/mu_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/random
OLD_FILES+=usr/include/c++/4.2/tr1/random.tcc
OLD_FILES+=usr/include/c++/4.2/tr1/ref_fwd.h
OLD_FILES+=usr/include/c++/4.2/tr1/ref_wrap_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/repeat.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdarg.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdbool.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdint.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdio.h
OLD_FILES+=usr/include/c++/4.2/tr1/stdlib.h
OLD_FILES+=usr/include/c++/4.2/tr1/tgmath.h
OLD_FILES+=usr/include/c++/4.2/tr1/tuple
OLD_FILES+=usr/include/c++/4.2/tr1/tuple_defs.h
OLD_FILES+=usr/include/c++/4.2/tr1/tuple_iterate.h
OLD_FILES+=usr/include/c++/4.2/tr1/type_traits
OLD_FILES+=usr/include/c++/4.2/tr1/type_traits_fwd.h
OLD_FILES+=usr/include/c++/4.2/tr1/unordered_map
OLD_FILES+=usr/include/c++/4.2/tr1/unordered_set
OLD_FILES+=usr/include/c++/4.2/tr1/utility
OLD_FILES+=usr/include/c++/4.2/tr1/wchar.h
OLD_FILES+=usr/include/c++/4.2/tr1/wctype.h
OLD_FILES+=usr/include/c++/4.2/typeinfo
OLD_FILES+=usr/include/c++/4.2/utility
OLD_FILES+=usr/include/c++/4.2/valarray
OLD_FILES+=usr/include/c++/4.2/vector
OLD_FILES+=usr/lib/libstdc++.a
OLD_FILES+=usr/lib/libstdc++.so
OLD_LIBS+=usr/lib/libstdc++.so.6
OLD_FILES+=usr/lib/libstdc++_p.a
OLD_FILES+=usr/lib/libsupc++.a
OLD_FILES+=usr/lib/libsupc++.so
OLD_LIBS+=usr/lib/libsupc++.so.1
OLD_FILES+=usr/lib/libsupc++_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libstdc++.a
OLD_FILES+=usr/lib32/libstdc++.so
OLD_LIBS+=usr/lib32/libstdc++.so.6
OLD_FILES+=usr/lib32/libstdc++_p.a
OLD_FILES+=usr/lib32/libsupc++.a
OLD_FILES+=usr/lib32/libsupc++.so
OLD_LIBS+=usr/lib32/libsupc++.so.1
OLD_FILES+=usr/lib32/libsupc++_p.a
.endif
OLD_FILES+=usr/libexec/cc1plus
.endif
.if ${MK_DICT} == no
OLD_FILES+=usr/share/dict/README
OLD_FILES+=usr/share/dict/eign
OLD_FILES+=usr/share/dict/freebsd
OLD_FILES+=usr/share/dict/propernames
OLD_FILES+=usr/share/dict/web2
OLD_FILES+=usr/share/dict/web2a
OLD_FILES+=usr/share/dict/words
OLD_DIRS+=usr/share/dict
.endif
.if ${MK_DMAGENT} == no
OLD_FILES+=etc/dma/dma.conf
OLD_FILES+=usr/libexec/dma
OLD_FILES+=usr/libexec/dma-mbox-create
OLD_FILES+=usr/share/man/man8/dma.8.gz
OLD_FILES+=usr/share/examples/dma/mailer.conf
.endif
.if ${MK_EE} == no
OLD_FILES+=usr/bin/edit
OLD_FILES+=usr/bin/ee
OLD_FILES+=usr/bin/ree
OLD_FILES+=usr/share/man/man1/edit.1.gz
OLD_FILES+=usr/share/man/man1/ee.1.gz
OLD_FILES+=usr/share/man/man1/ree.1.gz
OLD_FILES+=usr/share/nls/C/ee.cat
OLD_FILES+=usr/share/nls/de_DE.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/ee.cat
OLD_FILES+=usr/share/nls/pl_PL.ISO8859-2/ee.cat
OLD_FILES+=usr/share/nls/pt_BR.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/ee.cat
OLD_FILES+=usr/share/nls/uk_UA.KOI8-U/ee.cat
.endif
.if ${MK_EXAMPLES} == no
OLD_DIRS+=usr/share/examples
OLD_DIRS+=usr/share/examples/BSD_daemon
OLD_DIRS+=usr/share/examples/FreeBSD_version
OLD_DIRS+=usr/share/examples/IPv6
OLD_DIRS+=usr/share/examples/bootforth
OLD_DIRS+=usr/share/examples/bsdconfig
OLD_DIRS+=usr/share/examples/csh
OLD_DIRS+=usr/share/examples/diskless
OLD_DIRS+=usr/share/examples/dma
OLD_DIRS+=usr/share/examples/drivers
OLD_DIRS+=usr/share/examples/etc
OLD_DIRS+=usr/share/examples/etc/defaults
OLD_DIRS+=usr/share/examples/find_interface
OLD_DIRS+=usr/share/examples/hast
OLD_DIRS+=usr/share/examples/ibcs2
OLD_DIRS+=usr/share/examples/indent
OLD_DIRS+=usr/share/examples/ipfw
OLD_DIRS+=usr/share/examples/jails
OLD_DIRS+=usr/share/examples/kld
OLD_DIRS+=usr/share/examples/kld/cdev
OLD_DIRS+=usr/share/examples/kld/cdev/module
OLD_DIRS+=usr/share/examples/kld/cdev/test
OLD_DIRS+=usr/share/examples/kld/dyn_sysctl
OLD_DIRS+=usr/share/examples/kld/firmware
OLD_DIRS+=usr/share/examples/kld/firmware/fwconsumer
OLD_DIRS+=usr/share/examples/kld/firmware/fwimage
OLD_DIRS+=usr/share/examples/kld/khelp
OLD_DIRS+=usr/share/examples/kld/syscall
OLD_DIRS+=usr/share/examples/kld/syscall/module
OLD_DIRS+=usr/share/examples/kld/syscall/test
OLD_DIRS+=usr/share/examples/libusb20
OLD_DIRS+=usr/share/examples/libvgl
OLD_DIRS+=usr/share/examples/mdoc
OLD_DIRS+=usr/share/examples/netgraph
OLD_DIRS+=usr/share/examples/perfmon
OLD_DIRS+=usr/share/examples/ppi
OLD_DIRS+=usr/share/examples/ppp
OLD_DIRS+=usr/share/examples/printing
OLD_DIRS+=usr/share/examples/scsi_target
OLD_DIRS+=usr/share/examples/ses
OLD_DIRS+=usr/share/examples/ses/getencstat
OLD_DIRS+=usr/share/examples/ses/sesd
OLD_DIRS+=usr/share/examples/ses/setencstat
OLD_DIRS+=usr/share/examples/ses/setobjstat
OLD_DIRS+=usr/share/examples/ses/srcs
OLD_DIRS+=usr/share/examples/smbfs
OLD_DIRS+=usr/share/examples/smbfs/print
OLD_DIRS+=usr/share/examples/sunrpc
OLD_DIRS+=usr/share/examples/sunrpc/dir
OLD_DIRS+=usr/share/examples/sunrpc/msg
OLD_DIRS+=usr/share/examples/sunrpc/sort
OLD_DIRS+=usr/share/examples/tcsh
OLD_DIRS+=usr/share/examples/uefisign
OLD_DIRS+=usr/share/examples/ypldap
.endif
.if ${MK_FINGER} == no
OLD_FILES+=usr/bin/finger
OLD_FILES+=usr/share/man/man1/finger.1.gz
OLD_FILES+=usr/share/man/man5/finger.conf.5.gz
OLD_FILES+=usr/libexec/fingerd
OLD_FILES+=usr/share/man/man8/fingerd.8.gz
.endif
.if ${MK_FLOPPY} == no
OLD_FILES+=usr/sbin/fdcontrol
OLD_FILES+=usr/sbin/fdformat
OLD_FILES+=usr/sbin/fdread
OLD_FILES+=usr/sbin/fdwrite
OLD_FILES+=usr/share/man/man1/fdformat.1.gz
OLD_FILES+=usr/share/man/man1/fdread.1.gz
OLD_FILES+=usr/share/man/man1/fdwrite.1.gz
OLD_FILES+=usr/share/man/man8/fdcontrol.8.gz
.endif
.if ${MK_FORTH} == no
OLD_FILES+=usr/share/man/man8/beastie.4th.8.gz
OLD_FILES+=usr/share/man/man8/brand.4th.8.gz
OLD_FILES+=usr/share/man/man8/check-password.4th.8.gz
OLD_FILES+=usr/share/man/man8/color.4th.8.gz
OLD_FILES+=usr/share/man/man8/delay.4th.8.gz
OLD_FILES+=usr/share/man/man8/loader.4th.8.gz
OLD_FILES+=usr/share/man/man8/menu.4th.8.gz
OLD_FILES+=usr/share/man/man8/menusets.4th.8.gz
OLD_FILES+=usr/share/man/man8/version.4th.8.gz
.endif
.if ${MK_FREEBSD_UPDATE} == no
OLD_FILES+=etc/freebsd-update.conf
OLD_FILES+=usr/sbin/freebsd-update
OLD_FILES+=usr/share/examples/etc/freebsd-update.conf
OLD_FILES+=usr/share/man/man5/freebsd-update.conf.5.gz
OLD_FILES+=usr/share/man/man8/freebsd-update.8.gz
.endif
.if ${MK_GAMES} == no
OLD_FILES+=usr/bin/caesar
OLD_FILES+=usr/bin/factor
OLD_FILES+=usr/bin/fortune
OLD_FILES+=usr/bin/grdc
OLD_FILES+=usr/bin/morse
OLD_FILES+=usr/bin/number
OLD_FILES+=usr/bin/pom
OLD_FILES+=usr/bin/primes
OLD_FILES+=usr/bin/random
OLD_FILES+=usr/bin/rot13
OLD_FILES+=usr/bin/strfile
OLD_FILES+=usr/bin/unstr
OLD_FILES+=usr/share/games/fortune/fortunes
OLD_FILES+=usr/share/games/fortune/fortunes.dat
OLD_FILES+=usr/share/games/fortune/freebsd-tips
OLD_FILES+=usr/share/games/fortune/freebsd-tips.dat
OLD_FILES+=usr/share/games/fortune/gerrold.limerick
OLD_FILES+=usr/share/games/fortune/gerrold.limerick.dat
OLD_FILES+=usr/share/games/fortune/limerick
OLD_FILES+=usr/share/games/fortune/limerick.dat
OLD_FILES+=usr/share/games/fortune/murphy
OLD_FILES+=usr/share/games/fortune/murphy-o
OLD_FILES+=usr/share/games/fortune/murphy-o.dat
OLD_FILES+=usr/share/games/fortune/murphy.dat
OLD_FILES+=usr/share/games/fortune/startrek
OLD_FILES+=usr/share/games/fortune/startrek.dat
OLD_FILES+=usr/share/games/fortune/zippy
OLD_FILES+=usr/share/games/fortune/zippy.dat
OLD_DIRS+=usr/share/games/fortune
OLD_DIRS+=usr/share/games
OLD_FILES+=usr/share/man/man6/caesar.6.gz
OLD_FILES+=usr/share/man/man6/factor.6.gz
OLD_FILES+=usr/share/man/man6/fortune.6.gz
OLD_FILES+=usr/share/man/man6/grdc.6.gz
OLD_FILES+=usr/share/man/man6/morse.6.gz
OLD_FILES+=usr/share/man/man6/number.6.gz
OLD_FILES+=usr/share/man/man6/pom.6.gz
OLD_FILES+=usr/share/man/man6/primes.6.gz
OLD_FILES+=usr/share/man/man6/random.6.gz
OLD_FILES+=usr/share/man/man6/rot13.6.gz
OLD_FILES+=usr/share/man/man8/strfile.8.gz
OLD_FILES+=usr/share/man/man8/unstr.8.gz
.endif
.if ${MK_GCC} == no
OLD_FILES+=usr/bin/g++
OLD_FILES+=usr/bin/gcc
OLD_FILES+=usr/bin/gcov
OLD_FILES+=usr/bin/gcpp
OLD_FILES+=usr/bin/gperf
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "i386"
OLD_FILES+=usr/include/gcc/4.2/__wmmintrin_aes.h
OLD_FILES+=usr/include/gcc/4.2/__wmmintrin_pclmul.h
OLD_FILES+=usr/include/gcc/4.2/ammintrin.h
OLD_FILES+=usr/include/gcc/4.2/emmintrin.h
OLD_FILES+=usr/include/gcc/4.2/mm3dnow.h
OLD_FILES+=usr/include/gcc/4.2/mm_malloc.h
OLD_FILES+=usr/include/gcc/4.2/mmintrin.h
OLD_FILES+=usr/include/gcc/4.2/pmmintrin.h
OLD_FILES+=usr/include/gcc/4.2/tmmintrin.h
OLD_FILES+=usr/include/gcc/4.2/wmmintrin.h
OLD_FILES+=usr/include/gcc/4.2/xmmintrin.h
.elif ${TARGET_ARCH} == "arm"
OLD_FILES+=usr/include/gcc/4.2/mmintrin.h
.elif ${TARGET_ARCH} == "powerpc" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/include/gcc/4.2/altivec.h
OLD_FILES+=usr/include/gcc/4.2/ppc-asm.h
OLD_FILES+=usr/include/gcc/4.2/spe.h
.endif
OLD_FILES+=usr/include/omp.h
OLD_FILES+=usr/lib/libgcov.a
OLD_FILES+=usr/lib/libgomp.a
OLD_FILES+=usr/lib/libgomp.so
OLD_LIBS+=usr/lib/libgomp.so.1
OLD_FILES+=usr/lib/libgomp_p.a
OLD_FILES+=usr/lib32/libgcov.a
OLD_FILES+=usr/lib32/libgomp.a
OLD_FILES+=usr/lib32/libgomp.so
OLD_LIBS+=usr/lib32/libgomp.so.1
OLD_FILES+=usr/lib32/libgomp_p.a
OLD_FILES+=usr/libexec/cc1
OLD_FILES+=usr/libexec/cc1plus
OLD_FILES+=usr/share/man/man1/g++.1.gz
OLD_FILES+=usr/share/man/man1/gcc.1.gz
OLD_FILES+=usr/share/man/man1/gcov.1.gz
OLD_FILES+=usr/share/man/man1/gcpp.1.gz
OLD_FILES+=usr/share/man/man1/gperf.1.gz
OLD_FILES+=usr/share/man/man1/gperf.7.gz
.endif
.if ${MK_GCOV} == no
OLD_FILES+=usr/bin/gcov
OLD_FILES+=usr/share/man/man1/gcov.1.gz
.endif
.if ${MK_GDB} == no || ${MK_GDB_LIBEXEC} == yes
OLD_FILES+=usr/bin/gdb
OLD_FILES+=usr/bin/gdbserver
OLD_FILES+=usr/bin/kgdb
OLD_FILES+=usr/share/man/man1/gdb.1.gz
OLD_FILES+=usr/share/man/man1/gdbserver.1.gz
OLD_FILES+=usr/share/man/man1/kgdb.1.gz
.endif
.if ${MK_GDB} == no || ${MK_GDB_LIBEXEC} == no
OLD_FILES+=usr/libexec/gdb
OLD_FILES+=usr/libexec/kgdb
.endif
.if ${MK_GPIO} == no
OLD_FILES+=usr/include/libgpio.h
OLD_FILES+=usr/lib/libgpio.a
OLD_FILES+=usr/lib/libgpio.so
OLD_LIBS+=usr/lib/libgpio.so.0
OLD_FILES+=usr/lib/libgpio_p.a
OLD_FILES+=usr/lib32/libgpio.a
OLD_FILES+=usr/lib32/libgpio.so
OLD_LIBS+=usr/lib32/libgpio.so.0
OLD_FILES+=usr/lib32/libgpio_p.a
OLD_FILES+=usr/sbin/gpioctl
OLD_FILES+=usr/share/man/man3/gpio.3.gz
OLD_FILES+=usr/share/man/man3/gpio_close.3.gz
OLD_FILES+=usr/share/man/man3/gpio_open.3.gz
OLD_FILES+=usr/share/man/man3/gpio_open_device.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_config.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_get.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_high.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_input.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_invin.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_invout.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_list.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_low.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_opendrain.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_output.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_pulldown.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_pullup.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_pulsate.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_pushpull.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_set.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_set_flags.3.gz
OLD_FILES+=usr/share/man/man3/gpio_pin_tristate.3.gz
OLD_FILES+=usr/share/man/man8/gpioctl.8.gz
.endif
.if ${MK_GNU_DIFF} == no
OLD_FILES+=usr/bin/diff3
OLD_FILES+=usr/share/man/man1/diff3.1.gz
.endif
.if ${MK_GNU_GREP} == no
OLD_FILES+=usr/bin/gnugrep
OLD_FILES+=usr/share/man/man1/gnugrep.1.gz
.if ${MK_BSD_GREP} == no
OLD_FILES+=usr/bin/bzgrep
OLD_FILES+=usr/bin/bzegrep
OLD_FILES+=usr/bin/bzfgrep
OLD_FILES+=usr/bin/egrep
OLD_FILES+=usr/bin/fgrep
OLD_FILES+=usr/bin/grep
OLD_FILES+=usr/bin/zegrep
OLD_FILES+=usr/bin/zfgrep
OLD_FILES+=usr/bin/zgrep
OLD_FILES+=usr/share/man/man1/bzegrep.1.gz
OLD_FILES+=usr/share/man/man1/bzfgrep.1.gz
OLD_FILES+=usr/share/man/man1/bzgrep.1.gz
OLD_FILES+=usr/share/man/man1/egrep.1.gz
OLD_FILES+=usr/share/man/man1/fgrep.1.gz
OLD_FILES+=usr/share/man/man1/grep.1.gz
OLD_FILES+=usr/share/man/man1/zegrep.1.gz
OLD_FILES+=usr/share/man/man1/zfgrep.1.gz
OLD_FILES+=usr/share/man/man1/zgrep.1.gz
.endif
.endif
.if ${MK_GSSAPI} == no
OLD_FILES+=usr/include/gssapi/gssapi.h
OLD_DIRS+=usr/include/gssapi
OLD_FILES+=usr/include/gssapi.h
OLD_FILES+=usr/lib/libgssapi.a
OLD_FILES+=usr/lib/libgssapi.so
OLD_LIBS+=usr/lib/libgssapi.so.10
OLD_FILES+=usr/lib/libgssapi_p.a
OLD_FILES+=usr/lib/librpcsec_gss.a
OLD_FILES+=usr/lib/librpcsec_gss.so
OLD_LIBS+=usr/lib/librpcsec_gss.so.1
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libgssapi.a
OLD_FILES+=usr/lib32/libgssapi.so
OLD_LIBS+=usr/lib32/libgssapi.so.10
OLD_FILES+=usr/lib32/libgssapi_p.a
OLD_FILES+=usr/lib32/librpcsec_gss.a
OLD_FILES+=usr/lib32/librpcsec_gss.so
OLD_LIBS+=usr/lib32/librpcsec_gss.so.1
.endif
OLD_FILES+=usr/sbin/gssd
OLD_FILES+=usr/share/man/man3/gss_accept_sec_context.3.gz
OLD_FILES+=usr/share/man/man3/gss_acquire_cred.3.gz
OLD_FILES+=usr/share/man/man3/gss_add_cred.3.gz
OLD_FILES+=usr/share/man/man3/gss_add_oid_set_member.3.gz
OLD_FILES+=usr/share/man/man3/gss_canonicalize_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_compare_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_context_time.3.gz
OLD_FILES+=usr/share/man/man3/gss_create_empty_oid_set.3.gz
OLD_FILES+=usr/share/man/man3/gss_delete_sec_context.3.gz
OLD_FILES+=usr/share/man/man3/gss_display_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_display_status.3.gz
OLD_FILES+=usr/share/man/man3/gss_duplicate_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_export_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_export_sec_context.3.gz
OLD_FILES+=usr/share/man/man3/gss_get_mic.3.gz
OLD_FILES+=usr/share/man/man3/gss_import_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_import_sec_context.3.gz
OLD_FILES+=usr/share/man/man3/gss_indicate_mechs.3.gz
OLD_FILES+=usr/share/man/man3/gss_init_sec_context.3.gz
OLD_FILES+=usr/share/man/man3/gss_inquire_context.3.gz
OLD_FILES+=usr/share/man/man3/gss_inquire_cred.3.gz
OLD_FILES+=usr/share/man/man3/gss_inquire_cred_by_mech.3.gz
OLD_FILES+=usr/share/man/man3/gss_inquire_mechs_for_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_inquire_names_for_mech.3.gz
OLD_FILES+=usr/share/man/man3/gss_process_context_token.3.gz
OLD_FILES+=usr/share/man/man3/gss_release_buffer.3.gz
OLD_FILES+=usr/share/man/man3/gss_release_cred.3.gz
OLD_FILES+=usr/share/man/man3/gss_release_name.3.gz
OLD_FILES+=usr/share/man/man3/gss_release_oid_set.3.gz
OLD_FILES+=usr/share/man/man3/gss_seal.3.gz
OLD_FILES+=usr/share/man/man3/gss_sign.3.gz
OLD_FILES+=usr/share/man/man3/gss_test_oid_set_member.3.gz
OLD_FILES+=usr/share/man/man3/gss_unseal.3.gz
OLD_FILES+=usr/share/man/man3/gss_unwrap.3.gz
OLD_FILES+=usr/share/man/man3/gss_verify.3.gz
OLD_FILES+=usr/share/man/man3/gss_verify_mic.3.gz
OLD_FILES+=usr/share/man/man3/gss_wrap.3.gz
OLD_FILES+=usr/share/man/man3/gss_wrap_size_limit.3.gz
OLD_FILES+=usr/share/man/man3/gssapi.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_get_error.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_get_mech_info.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_get_mechanisms.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_get_principal_name.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_get_versions.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_getcred.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_is_installed.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_max_data_length.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_mech_to_oid.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_oid_to_mech.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_qop_to_num.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_seccreate.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_set_callback.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_set_defaults.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_set_svc_name.3.gz
OLD_FILES+=usr/share/man/man3/rpc_gss_svc_max_data_length.3.gz
OLD_FILES+=usr/share/man/man3/rpcsec_gss.3.gz
OLD_FILES+=usr/share/man/man5/mech.5.gz
OLD_FILES+=usr/share/man/man5/qop.5.gz
OLD_FILES+=usr/share/man/man8/gssd.8.gz
.endif
.if ${MK_HAST} == no
OLD_FILES+=sbin/hastctl
OLD_FILES+=sbin/hastd
OLD_FILES+=usr/share/examples/hast/ucarp.sh
OLD_FILES+=usr/share/examples/hast/ucarp_down.sh
OLD_FILES+=usr/share/examples/hast/ucarp_up.sh
OLD_FILES+=usr/share/examples/hast/vip-down.sh
OLD_FILES+=usr/share/examples/hast/vip-up.sh
OLD_FILES+=usr/share/man/man5/hast.conf.5.gz
OLD_FILES+=usr/share/man/man8/hastctl.8.gz
OLD_FILES+=usr/share/man/man8/hastd.8.gz
OLD_DIRS+=usr/share/examples/hast
# bsnmp
OLD_FILES+=usr/lib/snmp_hast.so
OLD_LIBS+=usr/lib/snmp_hast.so.6
OLD_FILES+=usr/share/man/man3/snmp_hast.3.gz
OLD_FILES+=usr/share/snmp/defs/hast_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-HAST-MIB.txt
.endif
.if ${MK_HESIOD} == no
OLD_FILES+=usr/bin/hesinfo
OLD_FILES+=usr/include/hesiod.h
OLD_FILES+=usr/share/man/man1/hesinfo.1.gz
OLD_FILES+=usr/share/man/man3/hesiod.3.gz
OLD_FILES+=usr/share/man/man5/hesiod.conf.5.gz
.endif
.if ${MK_HTML} == no
OLD_FILES+=usr/share/doc/ncurses/hackguide.html
OLD_FILES+=usr/share/doc/ncurses/ncurses-intro.html
OLD_DIRS+=usr/share/doc/ncurses
OLD_FILES+=usr/share/doc/ntp/accopt.html
OLD_FILES+=usr/share/doc/ntp/assoc.html
OLD_FILES+=usr/share/doc/ntp/audio.html
OLD_FILES+=usr/share/doc/ntp/authopt.html
OLD_FILES+=usr/share/doc/ntp/build.html
OLD_FILES+=usr/share/doc/ntp/clockopt.html
OLD_FILES+=usr/share/doc/ntp/config.html
OLD_FILES+=usr/share/doc/ntp/confopt.html
OLD_FILES+=usr/share/doc/ntp/copyright.html
OLD_FILES+=usr/share/doc/ntp/debug.html
OLD_FILES+=usr/share/doc/ntp/driver1.html
OLD_FILES+=usr/share/doc/ntp/driver10.html
OLD_FILES+=usr/share/doc/ntp/driver11.html
OLD_FILES+=usr/share/doc/ntp/driver12.html
OLD_FILES+=usr/share/doc/ntp/driver16.html
OLD_FILES+=usr/share/doc/ntp/driver18.html
OLD_FILES+=usr/share/doc/ntp/driver19.html
OLD_FILES+=usr/share/doc/ntp/driver2.html
OLD_FILES+=usr/share/doc/ntp/driver20.html
OLD_FILES+=usr/share/doc/ntp/driver22.html
OLD_FILES+=usr/share/doc/ntp/driver26.html
OLD_FILES+=usr/share/doc/ntp/driver27.html
OLD_FILES+=usr/share/doc/ntp/driver28.html
OLD_FILES+=usr/share/doc/ntp/driver29.html
OLD_FILES+=usr/share/doc/ntp/driver3.html
OLD_FILES+=usr/share/doc/ntp/driver30.html
OLD_FILES+=usr/share/doc/ntp/driver32.html
OLD_FILES+=usr/share/doc/ntp/driver33.html
OLD_FILES+=usr/share/doc/ntp/driver34.html
OLD_FILES+=usr/share/doc/ntp/driver35.html
OLD_FILES+=usr/share/doc/ntp/driver36.html
OLD_FILES+=usr/share/doc/ntp/driver37.html
OLD_FILES+=usr/share/doc/ntp/driver4.html
OLD_FILES+=usr/share/doc/ntp/driver5.html
OLD_FILES+=usr/share/doc/ntp/driver6.html
OLD_FILES+=usr/share/doc/ntp/driver7.html
OLD_FILES+=usr/share/doc/ntp/driver8.html
OLD_FILES+=usr/share/doc/ntp/driver9.html
OLD_FILES+=usr/share/doc/ntp/extern.html
OLD_FILES+=usr/share/doc/ntp/hints.html
OLD_FILES+=usr/share/doc/ntp/howto.html
OLD_FILES+=usr/share/doc/ntp/index.html
OLD_FILES+=usr/share/doc/ntp/kern.html
OLD_FILES+=usr/share/doc/ntp/ldisc.html
OLD_FILES+=usr/share/doc/ntp/measure.html
OLD_FILES+=usr/share/doc/ntp/miscopt.html
OLD_FILES+=usr/share/doc/ntp/monopt.html
OLD_FILES+=usr/share/doc/ntp/mx4200data.html
OLD_FILES+=usr/share/doc/ntp/notes.html
OLD_FILES+=usr/share/doc/ntp/ntpd.html
OLD_FILES+=usr/share/doc/ntp/ntpdate.html
OLD_FILES+=usr/share/doc/ntp/ntpdc.html
OLD_FILES+=usr/share/doc/ntp/ntpq.html
OLD_FILES+=usr/share/doc/ntp/ntptime.html
OLD_FILES+=usr/share/doc/ntp/ntptrace.html
OLD_FILES+=usr/share/doc/ntp/parsedata.html
OLD_FILES+=usr/share/doc/ntp/parsenew.html
OLD_FILES+=usr/share/doc/ntp/patches.html
OLD_FILES+=usr/share/doc/ntp/porting.html
OLD_FILES+=usr/share/doc/ntp/pps.html
OLD_FILES+=usr/share/doc/ntp/prefer.html
OLD_FILES+=usr/share/doc/ntp/quick.html
OLD_FILES+=usr/share/doc/ntp/rdebug.html
OLD_FILES+=usr/share/doc/ntp/refclock.html
OLD_FILES+=usr/share/doc/ntp/release.html
OLD_FILES+=usr/share/doc/ntp/tickadj.html
.endif
.if ${MK_ICONV} == no
OLD_FILES+=usr/bin/iconv
OLD_FILES+=usr/bin/mkcsmapper
OLD_FILES+=usr/bin/mkesdb
OLD_FILES+=usr/include/_libiconv_compat.h
OLD_FILES+=usr/include/iconv.h
OLD_FILES+=usr/share/man/man1/iconv.1.gz
OLD_FILES+=usr/share/man/man1/mkcsmapper.1.gz
OLD_FILES+=usr/share/man/man1/mkesdb.1.gz
OLD_FILES+=usr/share/man/man3/__iconv.3.gz
OLD_FILES+=usr/share/man/man3/__iconv_free_list.3.gz
OLD_FILES+=usr/share/man/man3/__iconv_get_list.3.gz
OLD_FILES+=usr/share/man/man3/iconv.3.gz
OLD_FILES+=usr/share/man/man3/iconv_canonicalize.3.gz
OLD_FILES+=usr/share/man/man3/iconv_close.3.gz
OLD_FILES+=usr/share/man/man3/iconv_open.3.gz
OLD_FILES+=usr/share/man/man3/iconv_open_into.3.gz
OLD_FILES+=usr/share/man/man3/iconvctl.3.gz
OLD_FILES+=usr/share/man/man3/iconvlist.3.gz
OLD_DIRS+=usr/share/i18n
OLD_DIRS+=usr/share/i18n/esdb
OLD_DIRS+=usr/share/i18n/esdb/ISO-2022
OLD_DIRS+=usr/share/i18n/esdb/BIG5
OLD_DIRS+=usr/share/i18n/esdb/MISC
OLD_DIRS+=usr/share/i18n/esdb/TCVN
OLD_DIRS+=usr/share/i18n/esdb/EBCDIC
OLD_DIRS+=usr/share/i18n/esdb/ISO-8859
OLD_DIRS+=usr/share/i18n/esdb/GEORGIAN
OLD_DIRS+=usr/share/i18n/esdb/AST
OLD_DIRS+=usr/share/i18n/esdb/KAZAKH
OLD_DIRS+=usr/share/i18n/esdb/APPLE
OLD_DIRS+=usr/share/i18n/esdb/EUC
OLD_DIRS+=usr/share/i18n/esdb/CP
OLD_DIRS+=usr/share/i18n/esdb/DEC
OLD_DIRS+=usr/share/i18n/esdb/UTF
OLD_DIRS+=usr/share/i18n/esdb/GB
OLD_DIRS+=usr/share/i18n/esdb/ISO646
OLD_DIRS+=usr/share/i18n/esdb/KOI
OLD_DIRS+=usr/share/i18n/csmapper
OLD_DIRS+=usr/share/i18n/csmapper/KAZAKH
OLD_DIRS+=usr/share/i18n/csmapper/CNS
OLD_DIRS+=usr/share/i18n/csmapper/BIG5
OLD_DIRS+=usr/share/i18n/csmapper/JIS
OLD_DIRS+=usr/share/i18n/csmapper/KOI
OLD_DIRS+=usr/share/i18n/csmapper/TCVN
OLD_DIRS+=usr/share/i18n/csmapper/MISC
OLD_DIRS+=usr/share/i18n/csmapper/EBCDIC
OLD_DIRS+=usr/share/i18n/csmapper/ISO646
OLD_DIRS+=usr/share/i18n/csmapper/CP
OLD_DIRS+=usr/share/i18n/csmapper/GEORGIAN
OLD_DIRS+=usr/share/i18n/csmapper/ISO-8859
OLD_DIRS+=usr/share/i18n/csmapper/AST
OLD_DIRS+=usr/share/i18n/csmapper/APPLE
OLD_DIRS+=usr/share/i18n/csmapper/KS
OLD_DIRS+=usr/share/i18n/csmapper/GB
.endif
.if ${MK_INET6} == no
OLD_FILES+=sbin/ping6
OLD_FILES+=sbin/rtsol
OLD_FILES+=usr/sbin/ip6addrctl
OLD_FILES+=usr/sbin/mld6query
OLD_FILES+=usr/sbin/ndp
OLD_FILES+=usr/sbin/rip6query
OLD_FILES+=usr/sbin/route6d
OLD_FILES+=usr/sbin/rrenumd
OLD_FILES+=usr/sbin/rtadvctl
OLD_FILES+=usr/sbin/rtadvd
OLD_FILES+=usr/sbin/rtsold
OLD_FILES+=usr/sbin/traceroute6
OLD_FILES+=usr/share/doc/IPv6/IMPLEMENTATION
OLD_FILES+=usr/share/man/man5/rrenumd.conf.5.gz
OLD_FILES+=usr/share/man/man5/rtadvd.conf.5.gz
OLD_FILES+=usr/share/man/man8/ip6addrctl.8.gz
OLD_FILES+=usr/share/man/man8/mld6query.8.gz
OLD_FILES+=usr/share/man/man8/ndp.8.gz
OLD_FILES+=usr/share/man/man8/ping6.8.gz
OLD_FILES+=usr/share/man/man8/rip6query.8.gz
OLD_FILES+=usr/share/man/man8/route6d.8.gz
OLD_FILES+=usr/share/man/man8/rrenumd.8.gz
OLD_FILES+=usr/share/man/man8/rtadvctl.8.gz
OLD_FILES+=usr/share/man/man8/rtadvd.8.gz
OLD_FILES+=usr/share/man/man8/rtsol.8.gz
OLD_FILES+=usr/share/man/man8/rtsold.8.gz
OLD_FILES+=usr/share/man/man8/traceroute6.8.gz
.endif
.if ${MK_INET6_SUPPORT} == no
OLD_FILES+=rescue/ping6
OLD_FILES+=rescue/rtsol
.endif
.if ${MK_INETD} == no
OLD_FILES+=etc/rc.d/inetd
OLD_FILES+=usr/sbin/inetd
OLD_FILES+=usr/share/man/man5/inetd.conf.5.gz
OLD_FILES+=usr/share/man/man8/inetd.8.gz
.endif
.if ${MK_IPFILTER} == no
OLD_FILES+=etc/periodic/security/510.ipfdenied
OLD_FILES+=etc/periodic/security/610.ipf6denied
OLD_FILES+=rescue/ipf
OLD_FILES+=sbin/ipf
OLD_FILES+=sbin/ipfs
OLD_FILES+=sbin/ipfstat
OLD_FILES+=sbin/ipftest
OLD_FILES+=sbin/ipmon
OLD_FILES+=sbin/ipnat
OLD_FILES+=sbin/ippool
OLD_FILES+=sbin/ipresend
OLD_FILES+=usr/include/netinet/ip_auth.h
OLD_FILES+=usr/include/netinet/ip_compat.h
OLD_FILES+=usr/include/netinet/ip_fil.h
OLD_FILES+=usr/include/netinet/ip_frag.h
OLD_FILES+=usr/include/netinet/ip_htable.h
OLD_FILES+=usr/include/netinet/ip_lookup.h
OLD_FILES+=usr/include/netinet/ip_nat.h
OLD_FILES+=usr/include/netinet/ip_pool.h
OLD_FILES+=usr/include/netinet/ip_proxy.h
OLD_FILES+=usr/include/netinet/ip_rules.h
OLD_FILES+=usr/include/netinet/ip_scan.h
OLD_FILES+=usr/include/netinet/ip_state.h
OLD_FILES+=usr/include/netinet/ip_sync.h
OLD_FILES+=usr/include/netinet/ipl.h
OLD_FILES+=usr/share/examples/ipfilter/README
OLD_FILES+=usr/share/examples/ipfilter/BASIC.NAT
OLD_FILES+=usr/share/examples/ipfilter/BASIC_1.FW
OLD_FILES+=usr/share/examples/ipfilter/BASIC_2.FW
OLD_FILES+=usr/share/examples/ipfilter/example.1
OLD_FILES+=usr/share/examples/ipfilter/example.2
OLD_FILES+=usr/share/examples/ipfilter/example.3
OLD_FILES+=usr/share/examples/ipfilter/example.4
OLD_FILES+=usr/share/examples/ipfilter/example.5
OLD_FILES+=usr/share/examples/ipfilter/example.6
OLD_FILES+=usr/share/examples/ipfilter/example.7
OLD_FILES+=usr/share/examples/ipfilter/example.8
OLD_FILES+=usr/share/examples/ipfilter/example.9
OLD_FILES+=usr/share/examples/ipfilter/example.10
OLD_FILES+=usr/share/examples/ipfilter/example.11
OLD_FILES+=usr/share/examples/ipfilter/example.12
OLD_FILES+=usr/share/examples/ipfilter/example.13
OLD_FILES+=usr/share/examples/ipfilter/example.sr
OLD_FILES+=usr/share/examples/ipfilter/firewall
OLD_FILES+=usr/share/examples/ipfilter/ftp-proxy
OLD_FILES+=usr/share/examples/ipfilter/ftppxy
OLD_FILES+=usr/share/examples/ipfilter/nat-setup
OLD_FILES+=usr/share/examples/ipfilter/nat.eg
OLD_FILES+=usr/share/examples/ipfilter/server
OLD_FILES+=usr/share/examples/ipfilter/tcpstate
OLD_FILES+=usr/share/examples/ipfilter/example.14
OLD_FILES+=usr/share/examples/ipfilter/firewall.1
OLD_FILES+=usr/share/examples/ipfilter/firewall.2
OLD_FILES+=usr/share/examples/ipfilter/ipf.conf.permissive
OLD_FILES+=usr/share/examples/ipfilter/ipf.conf.restrictive
OLD_FILES+=usr/share/examples/ipfilter/ipf.conf.sample
OLD_FILES+=usr/share/examples/ipfilter/ipnat.conf.sample
OLD_FILES+=usr/share/examples/ipfilter/ipf-howto.txt
OLD_FILES+=usr/share/examples/ipfilter/examples.txt
OLD_FILES+=usr/share/examples/ipfilter/rules.txt
OLD_FILES+=usr/share/examples/ipfilter/mkfilters
OLD_DIRS+=usr/share/examples/ipfilter
OLD_FILES+=usr/share/man/man1/ipftest.1.gz
OLD_FILES+=usr/share/man/man1/ipresend.1.gz
OLD_FILES+=usr/share/man/man4/ipf.4.gz
OLD_FILES+=usr/share/man/man4/ipl.4.gz
OLD_FILES+=usr/share/man/man4/ipfilter.4.gz
OLD_FILES+=usr/share/man/man4/ipnat.4.gz
OLD_FILES+=usr/share/man/man5/ipf.5.gz
OLD_FILES+=usr/share/man/man5/ipf.conf.5.gz
OLD_FILES+=usr/share/man/man5/ipf6.conf.5.gz
OLD_FILES+=usr/share/man/man5/ipnat.5.gz
OLD_FILES+=usr/share/man/man5/ipnat.conf.5.gz
OLD_FILES+=usr/share/man/man5/ippool.5.gz
OLD_FILES+=usr/share/man/man8/ipf.8.gz
OLD_FILES+=usr/share/man/man8/ipfs.8.gz
OLD_FILES+=usr/share/man/man8/ipfstat.8.gz
OLD_FILES+=usr/share/man/man8/ipmon.8.gz
OLD_FILES+=usr/share/man/man8/ipnat.8.gz
OLD_FILES+=usr/share/man/man8/ippool.8.gz
.endif
.if ${MK_IPFW} == no
OLD_FILES+=etc/periodic/security/500.ipfwdenied
OLD_FILES+=etc/periodic/security/550.ipfwlimit
OLD_FILES+=sbin/ipfw
OLD_FILES+=sbin/natd
OLD_FILES+=usr/sbin/ipfwpcap
OLD_FILES+=usr/share/man/man8/ipfw.8.gz
OLD_FILES+=usr/share/man/man8/ipfwpcap.8.gz
OLD_FILES+=usr/share/man/man8/natd.8.gz
.endif
.if ${MK_ISCSI} == no
OLD_FILES+=etc/rc.d/iscsictl
OLD_FILES+=etc/rc.d/iscsid
OLD_FILES+=rescue/iscsictl
OLD_FILES+=rescue/iscsid
OLD_FILES+=sbin/iscontrol
OLD_FILES+=usr/bin/iscsictl
OLD_FILES+=usr/sbin/iscsid
OLD_FILES+=usr/share/man/man4/iscsi.4.gz
OLD_FILES+=usr/share/man/man4/iscsi_initiator.4.gz
OLD_FILES+=usr/share/man/man5/iscsi.conf.5.gz
OLD_FILES+=usr/share/man/man8/iscontrol.8.gz
OLD_FILES+=usr/share/man/man8/iscsictl.8.gz
OLD_FILES+=usr/share/man/man8/iscsid.8.gz
.endif
.if ${MK_JAIL} == no
OLD_FILES+=etc/rc.d/jail
OLD_FILES+=usr/sbin/jail
OLD_FILES+=usr/sbin/jexec
OLD_FILES+=usr/sbin/jls
OLD_FILES+=usr/share/man/man5/jail.conf.5.gz
OLD_FILES+=usr/share/man/man8/jail.8.gz
OLD_FILES+=usr/share/man/man8/jexec.8.gz
OLD_FILES+=usr/share/man/man8/jls.8.gz
.endif
.if ${MK_KDUMP} == no
OLD_FILES+=usr/bin/kdump
OLD_FILES+=usr/bin/truss
OLD_FILES+=usr/share/man/man1/kdump.1.gz
OLD_FILES+=usr/share/man/man1/truss.1.gz
.endif
.if ${MK_KERBEROS} == no
OLD_FILES+=etc/rc.d/ipropd_master
OLD_FILES+=etc/rc.d/ipropd_slave
OLD_FILES+=usr/bin/compile_et
OLD_FILES+=usr/bin/hxtool
OLD_FILES+=usr/bin/kadmin
OLD_FILES+=usr/bin/kdestroy
OLD_FILES+=usr/bin/kf
OLD_FILES+=usr/bin/kgetcred
OLD_FILES+=usr/bin/kinit
OLD_FILES+=usr/bin/klist
OLD_FILES+=usr/bin/kpasswd
OLD_FILES+=usr/bin/krb5-config
OLD_FILES+=usr/bin/ksu
OLD_FILES+=usr/bin/kswitch
OLD_FILES+=usr/bin/string2key
OLD_FILES+=usr/bin/verify_krb5_conf
OLD_FILES+=usr/include/asn1-common.h
OLD_FILES+=usr/include/asn1_err.h
OLD_FILES+=usr/include/base64.h
OLD_FILES+=usr/include/cms_asn1.h
OLD_FILES+=usr/include/crmf_asn1.h
OLD_FILES+=usr/include/der-private.h
OLD_FILES+=usr/include/der-protos.h
OLD_FILES+=usr/include/der.h
OLD_FILES+=usr/include/digest_asn1.h
OLD_FILES+=usr/include/getarg.h
OLD_FILES+=usr/include/gssapi/gssapi_krb5.h
OLD_FILES+=usr/include/hdb-protos.h
OLD_FILES+=usr/include/hdb.h
OLD_FILES+=usr/include/hdb_asn1.h
OLD_FILES+=usr/include/hdb_err.h
OLD_FILES+=usr/include/heim_asn1.h
OLD_FILES+=usr/include/heim_err.h
OLD_FILES+=usr/include/heim_threads.h
OLD_FILES+=usr/include/heimbase.h
OLD_FILES+=usr/include/heimntlm-protos.h
OLD_FILES+=usr/include/heimntlm.h
OLD_FILES+=usr/include/hex.h
OLD_FILES+=usr/include/hx509-private.h
OLD_FILES+=usr/include/hx509-protos.h
OLD_FILES+=usr/include/hx509.h
OLD_FILES+=usr/include/hx509_err.h
OLD_FILES+=usr/include/k524_err.h
OLD_FILES+=usr/include/kadm5/admin.h
OLD_FILES+=usr/include/kadm5/kadm5-private.h
OLD_FILES+=usr/include/kadm5/kadm5-protos.h
OLD_FILES+=usr/include/kadm5/kadm5-pwcheck.h
OLD_FILES+=usr/include/kadm5/kadm5_err.h
OLD_FILES+=usr/include/kadm5/private.h
OLD_DIRS+=usr/include/kadm5
OLD_FILES+=usr/include/kafs.h
OLD_FILES+=usr/include/kdc-protos.h
OLD_FILES+=usr/include/kdc.h
OLD_FILES+=usr/include/krb5-private.h
OLD_FILES+=usr/include/krb5-protos.h
OLD_FILES+=usr/include/krb5-types.h
OLD_FILES+=usr/include/krb5.h
OLD_FILES+=usr/include/krb5/ccache_plugin.h
OLD_FILES+=usr/include/krb5/locate_plugin.h
OLD_FILES+=usr/include/krb5/send_to_kdc_plugin.h
OLD_FILES+=usr/include/krb5/windc_plugin.h
OLD_DIRS+=usr/include/krb5
OLD_FILES+=usr/include/krb5_asn1.h
OLD_FILES+=usr/include/krb5_ccapi.h
OLD_FILES+=usr/include/krb5_err.h
OLD_FILES+=usr/include/kx509_asn1.h
OLD_FILES+=usr/include/ntlm_err.h
OLD_FILES+=usr/include/ocsp_asn1.h
OLD_FILES+=usr/include/parse_bytes.h
OLD_FILES+=usr/include/parse_time.h
OLD_FILES+=usr/include/parse_units.h
OLD_FILES+=usr/include/pkcs10_asn1.h
OLD_FILES+=usr/include/pkcs12_asn1.h
OLD_FILES+=usr/include/pkcs8_asn1.h
OLD_FILES+=usr/include/pkcs9_asn1.h
OLD_FILES+=usr/include/pkinit_asn1.h
OLD_FILES+=usr/include/resolve.h
OLD_FILES+=usr/include/rfc2459_asn1.h
OLD_FILES+=usr/include/roken-common.h
OLD_FILES+=usr/include/rtbl.h
OLD_FILES+=usr/include/wind.h
OLD_FILES+=usr/include/wind_err.h
OLD_FILES+=usr/include/xdbm.h
OLD_FILES+=usr/lib/libasn1.a
OLD_FILES+=usr/lib/libasn1.so
OLD_LIBS+=usr/lib/libasn1.so.11
OLD_FILES+=usr/lib/libasn1_p.a
OLD_FILES+=usr/lib/libcom_err.a
OLD_FILES+=usr/lib/libcom_err.so
OLD_LIBS+=usr/lib/libcom_err.so.5
OLD_FILES+=usr/lib/libcom_err_p.a
OLD_FILES+=usr/lib/libgssapi_krb5.a
OLD_FILES+=usr/lib/libgssapi_krb5.so
OLD_LIBS+=usr/lib/libgssapi_krb5.so.10
OLD_FILES+=usr/lib/libgssapi_krb5_p.a
OLD_FILES+=usr/lib/libgssapi_ntlm.a
OLD_FILES+=usr/lib/libgssapi_ntlm.so
OLD_LIBS+=usr/lib/libgssapi_ntlm.so.10
OLD_FILES+=usr/lib/libgssapi_ntlm_p.a
OLD_FILES+=usr/lib/libgssapi_spnego.a
OLD_FILES+=usr/lib/libgssapi_spnego.so
OLD_LIBS+=usr/lib/libgssapi_spnego.so.10
OLD_FILES+=usr/lib/libgssapi_spnego_p.a
OLD_FILES+=usr/lib/libhdb.a
OLD_FILES+=usr/lib/libhdb.so
OLD_LIBS+=usr/lib/libhdb.so.11
OLD_FILES+=usr/lib/libhdb_p.a
OLD_FILES+=usr/lib/libheimbase.a
OLD_FILES+=usr/lib/libheimbase.so
OLD_LIBS+=usr/lib/libheimbase.so.11
OLD_FILES+=usr/lib/libheimbase_p.a
OLD_FILES+=usr/lib/libheimntlm.a
OLD_FILES+=usr/lib/libheimntlm.so
OLD_LIBS+=usr/lib/libheimntlm.so.11
OLD_FILES+=usr/lib/libheimntlm_p.a
OLD_FILES+=usr/lib/libheimsqlite.a
OLD_FILES+=usr/lib/libheimsqlite.so
OLD_LIBS+=usr/lib/libheimsqlite.so.11
OLD_FILES+=usr/lib/libheimsqlite_p.a
OLD_FILES+=usr/lib/libhx509.a
OLD_FILES+=usr/lib/libhx509.so
OLD_LIBS+=usr/lib/libhx509.so.11
OLD_FILES+=usr/lib/libhx509_p.a
OLD_FILES+=usr/lib/libkadm5clnt.a
OLD_FILES+=usr/lib/libkadm5clnt.so
OLD_LIBS+=usr/lib/libkadm5clnt.so.11
OLD_FILES+=usr/lib/libkadm5clnt_p.a
OLD_FILES+=usr/lib/libkadm5srv.a
OLD_FILES+=usr/lib/libkadm5srv.so
OLD_LIBS+=usr/lib/libkadm5srv.so.11
OLD_FILES+=usr/lib/libkadm5srv_p.a
OLD_FILES+=usr/lib/libkafs5.a
OLD_FILES+=usr/lib/libkafs5.so
OLD_LIBS+=usr/lib/libkafs5.so.11
OLD_FILES+=usr/lib/libkafs5_p.a
OLD_FILES+=usr/lib/libkdc.a
OLD_FILES+=usr/lib/libkdc.so
OLD_LIBS+=usr/lib/libkdc.so.11
OLD_FILES+=usr/lib/libkdc_p.a
OLD_FILES+=usr/lib/libkrb5.a
OLD_FILES+=usr/lib/libkrb5.so
OLD_LIBS+=usr/lib/libkrb5.so.11
OLD_FILES+=usr/lib/libkrb5_p.a
OLD_FILES+=usr/lib/libroken.a
OLD_FILES+=usr/lib/libroken.so
OLD_LIBS+=usr/lib/libroken.so.11
OLD_FILES+=usr/lib/libroken_p.a
OLD_FILES+=usr/lib/libwind.a
OLD_FILES+=usr/lib/libwind.so
OLD_LIBS+=usr/lib/libwind.so.11
OLD_FILES+=usr/lib/libwind_p.a
OLD_FILES+=usr/lib/pam_krb5.so
OLD_LIBS+=usr/lib/pam_krb5.so.6
OLD_FILES+=usr/lib/pam_ksu.so
OLD_LIBS+=usr/lib/pam_ksu.so.6
OLD_FILES+=usr/lib/private/libheimipcc.a
OLD_FILES+=usr/lib/private/libheimipcc.so
OLD_LIBS+=usr/lib/private/libheimipcc.so.11
OLD_FILES+=usr/lib/private/libheimipcc_p.a
OLD_FILES+=usr/lib/private/libheimipcs.a
OLD_FILES+=usr/lib/private/libheimipcs.so
OLD_LIBS+=usr/lib/private/libheimipcs.so.11
OLD_FILES+=usr/lib/private/libheimipcs_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libasn1.a
OLD_FILES+=usr/lib32/libasn1.so
OLD_LIBS+=usr/lib32/libasn1.so.11
OLD_FILES+=usr/lib32/libasn1_p.a
OLD_FILES+=usr/lib32/libgssapi_krb5.a
OLD_FILES+=usr/lib32/libgssapi_krb5.so
OLD_LIBS+=usr/lib32/libgssapi_krb5.so.10
OLD_FILES+=usr/lib32/libgssapi_krb5_p.a
OLD_FILES+=usr/lib32/libgssapi_ntlm.a
OLD_FILES+=usr/lib32/libgssapi_ntlm.so
OLD_LIBS+=usr/lib32/libgssapi_ntlm.so.10
OLD_FILES+=usr/lib32/libgssapi_ntlm_p.a
OLD_FILES+=usr/lib32/libgssapi_spnego.a
OLD_FILES+=usr/lib32/libgssapi_spnego.so
OLD_LIBS+=usr/lib32/libgssapi_spnego.so.10
OLD_FILES+=usr/lib32/libgssapi_spnego_p.a
OLD_FILES+=usr/lib32/libhdb.a
OLD_FILES+=usr/lib32/libhdb.so
OLD_LIBS+=usr/lib32/libhdb.so.11
OLD_FILES+=usr/lib32/libhdb_p.a
OLD_FILES+=usr/lib32/libheimbase.a
OLD_FILES+=usr/lib32/libheimbase.so
OLD_LIBS+=usr/lib32/libheimbase.so.11
OLD_FILES+=usr/lib32/libheimbase_p.a
OLD_FILES+=usr/lib32/libheimntlm.a
OLD_FILES+=usr/lib32/libheimntlm.so
OLD_LIBS+=usr/lib32/libheimntlm.so.11
OLD_FILES+=usr/lib32/libheimntlm_p.a
OLD_FILES+=usr/lib32/libheimsqlite.a
OLD_FILES+=usr/lib32/libheimsqlite.so
OLD_LIBS+=usr/lib32/libheimsqlite.so.11
OLD_FILES+=usr/lib32/libheimsqlite_p.a
OLD_FILES+=usr/lib32/libhx509.a
OLD_FILES+=usr/lib32/libhx509.so
OLD_LIBS+=usr/lib32/libhx509.so.11
OLD_FILES+=usr/lib32/libhx509_p.a
OLD_FILES+=usr/lib32/libkadm5clnt.a
OLD_FILES+=usr/lib32/libkadm5clnt.so
OLD_LIBS+=usr/lib32/libkadm5clnt.so.11
OLD_FILES+=usr/lib32/libkadm5clnt_p.a
OLD_FILES+=usr/lib32/libkadm5srv.a
OLD_FILES+=usr/lib32/libkadm5srv.so
OLD_LIBS+=usr/lib32/libkadm5srv.so.11
OLD_FILES+=usr/lib32/libkadm5srv_p.a
OLD_FILES+=usr/lib32/libkafs5.a
OLD_FILES+=usr/lib32/libkafs5.so
OLD_LIBS+=usr/lib32/libkafs5.so.11
OLD_FILES+=usr/lib32/libkafs5_p.a
OLD_FILES+=usr/lib32/libkdc.a
OLD_FILES+=usr/lib32/libkdc.so
OLD_LIBS+=usr/lib32/libkdc.so.11
OLD_FILES+=usr/lib32/libkdc_p.a
OLD_FILES+=usr/lib32/libkrb5.a
OLD_FILES+=usr/lib32/libkrb5.so
OLD_LIBS+=usr/lib32/libkrb5.so.11
OLD_FILES+=usr/lib32/libkrb5_p.a
OLD_FILES+=usr/lib32/libroken.a
OLD_FILES+=usr/lib32/libroken.so
OLD_LIBS+=usr/lib32/libroken.so.11
OLD_FILES+=usr/lib32/libroken_p.a
OLD_FILES+=usr/lib32/libwind.a
OLD_FILES+=usr/lib32/libwind.so
OLD_LIBS+=usr/lib32/libwind.so.11
OLD_FILES+=usr/lib32/libwind_p.a
OLD_FILES+=usr/lib32/pam_krb5.so
OLD_LIBS+=usr/lib32/pam_krb5.so.6
OLD_FILES+=usr/lib32/pam_ksu.so
OLD_LIBS+=usr/lib32/pam_ksu.so.6
OLD_FILES+=usr/lib32/private/libheimipcc.a
OLD_FILES+=usr/lib32/private/libheimipcc.so
OLD_LIBS+=usr/lib32/private/libheimipcc.so.11
OLD_FILES+=usr/lib32/private/libheimipcc_p.a
OLD_FILES+=usr/lib32/private/libheimipcs.a
OLD_FILES+=usr/lib32/private/libheimipcs.so
OLD_LIBS+=usr/lib32/private/libheimipcs.so.11
OLD_FILES+=usr/lib32/private/libheimipcs_p.a
.endif
OLD_FILES+=usr/libexec/digest-service
OLD_FILES+=usr/libexec/hprop
OLD_FILES+=usr/libexec/hpropd
OLD_FILES+=usr/libexec/ipropd-master
OLD_FILES+=usr/libexec/ipropd-slave
OLD_FILES+=usr/libexec/kadmind
OLD_FILES+=usr/libexec/kcm
OLD_FILES+=usr/libexec/kdc
OLD_FILES+=usr/libexec/kdigest
OLD_FILES+=usr/libexec/kfd
OLD_FILES+=usr/libexec/kimpersonate
OLD_FILES+=usr/libexec/kpasswdd
OLD_FILES+=usr/sbin/kstash
OLD_FILES+=usr/sbin/ktutil
OLD_FILES+=usr/sbin/iprop-log
OLD_FILES+=usr/share/man/man1/kdestroy.1.gz
OLD_FILES+=usr/share/man/man1/kf.1.gz
OLD_FILES+=usr/share/man/man1/kinit.1.gz
OLD_FILES+=usr/share/man/man1/klist.1.gz
OLD_FILES+=usr/share/man/man1/kpasswd.1.gz
OLD_FILES+=usr/share/man/man1/krb5-config.1.gz
OLD_FILES+=usr/share/man/man1/kswitch.1.gz
OLD_FILES+=usr/share/man/man3/HDB.3.gz
OLD_FILES+=usr/share/man/man3/hdb__del.3.gz
OLD_FILES+=usr/share/man/man3/hdb__get.3.gz
OLD_FILES+=usr/share/man/man3/hdb__put.3.gz
OLD_FILES+=usr/share/man/man3/hdb_auth_status.3.gz
OLD_FILES+=usr/share/man/man3/hdb_check_constrained_delegation.3.gz
OLD_FILES+=usr/share/man/man3/hdb_check_pkinit_ms_upn_match.3.gz
OLD_FILES+=usr/share/man/man3/hdb_check_s4u2self.3.gz
OLD_FILES+=usr/share/man/man3/hdb_close.3.gz
OLD_FILES+=usr/share/man/man3/hdb_destroy.3.gz
OLD_FILES+=usr/share/man/man3/hdb_entry_ex.3.gz
OLD_FILES+=usr/share/man/man3/hdb_fetch_kvno.3.gz
OLD_FILES+=usr/share/man/man3/hdb_firstkey.3.gz
OLD_FILES+=usr/share/man/man3/hdb_free.3.gz
OLD_FILES+=usr/share/man/man3/hdb_get_realms.3.gz
OLD_FILES+=usr/share/man/man3/hdb_lock.3.gz
OLD_FILES+=usr/share/man/man3/hdb_name.3.gz
OLD_FILES+=usr/share/man/man3/hdb_nextkey.3.gz
OLD_FILES+=usr/share/man/man3/hdb_open.3.gz
OLD_FILES+=usr/share/man/man3/hdb_password.3.gz
OLD_FILES+=usr/share/man/man3/hdb_remove.3.gz
OLD_FILES+=usr/share/man/man3/hdb_rename.3.gz
OLD_FILES+=usr/share/man/man3/hdb_store.3.gz
OLD_FILES+=usr/share/man/man3/hdb_unlock.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_build_ntlm1_master.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_build_ntlm2_master.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_calculate_lm2.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_calculate_ntlm1.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_calculate_ntlm2.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_decode_targetinfo.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_targetinfo.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_type1.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_type2.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_type3.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_free_buf.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_free_targetinfo.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_free_type1.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_free_type2.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_free_type3.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_keyex_unwrap.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_nt_key.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_ntlmv2_key.3.gz
OLD_FILES+=usr/share/man/man3/heim_ntlm_verify_ntlm2.3.gz
OLD_FILES+=usr/share/man/man3/hx509.3.gz
OLD_FILES+=usr/share/man/man3/hx509_bitstring_print.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_sign.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_sign_self.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_crl_dp_uri.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_eku.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_hostname.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_jid.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_ms_upn.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_otherName.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_pkinit.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_rfc822name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_init.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_ca.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_domaincontroller.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_notAfter.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_notAfter_lifetime.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_notBefore.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_proxy.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_serialnumber.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_spki.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_subject.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_template.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_unique.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_subject_expand.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_template_units.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_binary.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_check_eku.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_cmp.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_find_subjectAltName_otherName.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_SPKI.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_SPKI_AlgorithmIdentifier.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_attribute.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_base_subject.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_friendly_name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_issuer.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_issuer_unique_id.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_notAfter.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_notBefore.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_serialnumber.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_subject.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_get_subject_unique_id.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_init.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_init_data.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_keyusage_print.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_ref.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cert_set_friendly_name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_add.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_append.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_end_seq.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_filter.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_find.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_info.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_init.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_iter_f.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_merge.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_next_cert.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_start_seq.3.gz
OLD_FILES+=usr/share/man/man3/hx509_certs_store.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ci_print_names.3.gz
OLD_FILES+=usr/share/man/man3/hx509_clear_error_string.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms_create_signed_1.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms_envelope_1.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms_unenvelope.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms_unwrap_ContentInfo.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms_verify_signed.3.gz
OLD_FILES+=usr/share/man/man3/hx509_cms_wrap_ContentInfo.3.gz
OLD_FILES+=usr/share/man/man3/hx509_context_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_context_init.3.gz
OLD_FILES+=usr/share/man/man3/hx509_context_set_missing_revoke.3.gz
OLD_FILES+=usr/share/man/man3/hx509_crl_add_revoked_certs.3.gz
OLD_FILES+=usr/share/man/man3/hx509_crl_alloc.3.gz
OLD_FILES+=usr/share/man/man3/hx509_crl_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_crl_lifetime.3.gz
OLD_FILES+=usr/share/man/man3/hx509_crl_sign.3.gz
OLD_FILES+=usr/share/man/man3/hx509_crypto.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env_add.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env_add_binding.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env_find.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env_find_binding.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_env_lfind.3.gz
OLD_FILES+=usr/share/man/man3/hx509_err.3.gz
OLD_FILES+=usr/share/man/man3/hx509_error.3.gz
OLD_FILES+=usr/share/man/man3/hx509_free_error_string.3.gz
OLD_FILES+=usr/share/man/man3/hx509_free_octet_string_list.3.gz
OLD_FILES+=usr/share/man/man3/hx509_general_name_unparse.3.gz
OLD_FILES+=usr/share/man/man3/hx509_get_error_string.3.gz
OLD_FILES+=usr/share/man/man3/hx509_get_one_cert.3.gz
OLD_FILES+=usr/share/man/man3/hx509_keyset.3.gz
OLD_FILES+=usr/share/man/man3/hx509_lock.3.gz
OLD_FILES+=usr/share/man/man3/hx509_misc.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_binary.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_cmp.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_copy.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_expand.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_is_null_p.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_to_Name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_name_to_string.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ocsp_request.3.gz
OLD_FILES+=usr/share/man/man3/hx509_ocsp_verify.3.gz
OLD_FILES+=usr/share/man/man3/hx509_oid_print.3.gz
OLD_FILES+=usr/share/man/man3/hx509_oid_sprint.3.gz
OLD_FILES+=usr/share/man/man3/hx509_parse_name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_peer.3.gz
OLD_FILES+=usr/share/man/man3/hx509_peer_info_add_cms_alg.3.gz
OLD_FILES+=usr/share/man/man3/hx509_peer_info_alloc.3.gz
OLD_FILES+=usr/share/man/man3/hx509_peer_info_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_peer_info_set_cert.3.gz
OLD_FILES+=usr/share/man/man3/hx509_peer_info_set_cms_algs.3.gz
OLD_FILES+=usr/share/man/man3/hx509_print.3.gz
OLD_FILES+=usr/share/man/man3/hx509_print_cert.3.gz
OLD_FILES+=usr/share/man/man3/hx509_print_stdout.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_alloc.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_match_cmp_func.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_match_eku.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_match_friendly_name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_match_issuer_serial.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_match_option.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_statistic_file.3.gz
OLD_FILES+=usr/share/man/man3/hx509_query_unparse_stats.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke_add_crl.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke_add_ocsp.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke_init.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke_ocsp_print.3.gz
OLD_FILES+=usr/share/man/man3/hx509_revoke_verify.3.gz
OLD_FILES+=usr/share/man/man3/hx509_set_error_string.3.gz
OLD_FILES+=usr/share/man/man3/hx509_set_error_stringv.3.gz
OLD_FILES+=usr/share/man/man3/hx509_unparse_der_name.3.gz
OLD_FILES+=usr/share/man/man3/hx509_validate_cert.3.gz
OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_add_flags.3.gz
OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_free.3.gz
OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_init.3.gz
OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_set_print.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_attach_anchors.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_attach_revoke.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_ctx_f_allow_default_trustanchors.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_destroy_ctx.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_hostname.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_init_ctx.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_path.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_set_max_depth.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_set_proxy_certificate.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_set_strict_rfc3280_verification.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_set_time.3.gz
OLD_FILES+=usr/share/man/man3/hx509_verify_signature.3.gz
OLD_FILES+=usr/share/man/man3/hx509_xfree.3.gz
OLD_FILES+=usr/share/man/man3/k_afs_cell_of_file.3.gz
OLD_FILES+=usr/share/man/man3/k_hasafs.3.gz
OLD_FILES+=usr/share/man/man3/k_pioctl.3.gz
OLD_FILES+=usr/share/man/man3/k_setpag.3.gz
OLD_FILES+=usr/share/man/man3/k_unlog.3.gz
OLD_FILES+=usr/share/man/man3/kadm5_pwcheck.3.gz
OLD_FILES+=usr/share/man/man3/kafs.3.gz
OLD_FILES+=usr/share/man/man3/kafs5.3.gz
OLD_FILES+=usr/share/man/man3/kafs_set_verbose.3.gz
OLD_FILES+=usr/share/man/man3/kafs_settoken.3.gz
OLD_FILES+=usr/share/man/man3/kafs_settoken5.3.gz
OLD_FILES+=usr/share/man/man3/kafs_settoken_rxkad.3.gz
OLD_FILES+=usr/share/man/man3/krb5.3.gz
OLD_FILES+=usr/share/man/man3/krb524_convert_creds_kdc.3.gz
OLD_FILES+=usr/share/man/man3/krb524_convert_creds_kdc_ccache.3.gz
OLD_FILES+=usr/share/man/man3/krb5_425_conv_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_425_conv_principal_ext.3.gz
OLD_FILES+=usr/share/man/man3/krb5_524_conv_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_acc_ops.3.gz
OLD_FILES+=usr/share/man/man3/krb5_acl_match_file.3.gz
OLD_FILES+=usr/share/man/man3/krb5_acl_match_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_add_et_list.3.gz
OLD_FILES+=usr/share/man/man3/krb5_add_extra_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_add_ignore_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_addlog_dest.3.gz
OLD_FILES+=usr/share/man/man3/krb5_addlog_func.3.gz
OLD_FILES+=usr/share/man/man3/krb5_addr2sockaddr.3.gz
OLD_FILES+=usr/share/man/man3/krb5_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_address_compare.3.gz
OLD_FILES+=usr/share/man/man3/krb5_address_order.3.gz
OLD_FILES+=usr/share/man/man3/krb5_address_prefixlen_boundary.3.gz
OLD_FILES+=usr/share/man/man3/krb5_address_search.3.gz
OLD_FILES+=usr/share/man/man3/krb5_afslog.3.gz
OLD_FILES+=usr/share/man/man3/krb5_afslog_uid.3.gz
OLD_FILES+=usr/share/man/man3/krb5_allow_weak_crypto.3.gz
OLD_FILES+=usr/share/man/man3/krb5_aname_to_localname.3.gz
OLD_FILES+=usr/share/man/man3/krb5_anyaddr.3.gz
OLD_FILES+=usr/share/man/man3/krb5_appdefault.3.gz
OLD_FILES+=usr/share/man/man3/krb5_appdefault_boolean.3.gz
OLD_FILES+=usr/share/man/man3/krb5_appdefault_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_appdefault_time.3.gz
OLD_FILES+=usr/share/man/man3/krb5_append_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_genaddrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getaddrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getflags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getlocalsubkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getrcache.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getremotesubkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_getuserkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_initivector.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setaddrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setaddrs_from_fd.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setflags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setivector.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setlocalsubkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setrcache.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setremotesubkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_con_setuserkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_context.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_getauthenticator.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_getcksumtype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_getkeytype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_getlocalseqnumber.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_getremoteseqnumber.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_setcksumtype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_setkeytype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_setlocalseqnumber.3.gz
OLD_FILES+=usr/share/man/man3/krb5_auth_setremoteseqnumber.3.gz
OLD_FILES+=usr/share/man/man3/krb5_build_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_build_principal_ext.3.gz
OLD_FILES+=usr/share/man/man3/krb5_build_principal_va.3.gz
OLD_FILES+=usr/share/man/man3/krb5_build_principal_va_ext.3.gz
OLD_FILES+=usr/share/man/man3/krb5_c_enctype_compare.3.gz
OLD_FILES+=usr/share/man/man3/krb5_c_make_checksum.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_cache_end_seq_get.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_cache_get_first.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_cache_match.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_cache_next.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_clear_mcred.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_close.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_copy_cache.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_copy_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_copy_match_f.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_default_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_destroy.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_end_seq_get.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_gen_new.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_config.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_friendly_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_full_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_kdc_offset.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_lifetime.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_ops.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_prefix_ops.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_type.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_get_version.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_initialize.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_last_change_time.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_move.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_new_unique.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_next_cred.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_register.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_remove_cred.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_resolve.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_retrieve_cred.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_set_config.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_set_default_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_set_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_set_friendly_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_set_kdc_offset.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_start_seq_get.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_store_cred.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_support_switch.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cc_switch.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ccache.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ccache_intro.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cccol_cursor_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cccol_cursor_new.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cccol_cursor_next.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cccol_last_change_time.3.gz
OLD_FILES+=usr/share/man/man3/krb5_change_password.3.gz
OLD_FILES+=usr/share/man/man3/krb5_check_transited.3.gz
OLD_FILES+=usr/share/man/man3/krb5_checksum_is_collision_proof.3.gz
OLD_FILES+=usr/share/man/man3/krb5_checksum_is_keyed.3.gz
OLD_FILES+=usr/share/man/man3/krb5_checksumsize.3.gz
OLD_FILES+=usr/share/man/man3/krb5_cksumtype_to_enctype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_clear_error_message.3.gz
OLD_FILES+=usr/share/man/man3/krb5_clear_error_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_closelog.3.gz
OLD_FILES+=usr/share/man/man3/krb5_compare_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_file_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_free_strings.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_bool.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_bool_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_list.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_string_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_strings.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_time.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_get_time_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_parse_file_multi.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_parse_string_multi.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_bool.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_bool_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_list.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_string_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_strings.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_time.3.gz
OLD_FILES+=usr/share/man/man3/krb5_config_vget_time_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_context.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_creds_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_data.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_host_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_keyblock.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_keyblock_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_copy_ticket.3.gz
OLD_FILES+=usr/share/man/man3/krb5_create_checksum.3.gz
OLD_FILES+=usr/share/man/man3/krb5_create_checksum_iov.3.gz
OLD_FILES+=usr/share/man/man3/krb5_credential.3.gz
OLD_FILES+=usr/share/man/man3/krb5_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_creds_get_ticket_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_destroy.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_fx_cf2.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_getblocksize.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_getconfoundersize.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_getenctype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_getpadsize.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_crypto_iov.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_alloc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_cmp.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_copy.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_ct_cmp.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_realloc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_data_zero.3.gz
OLD_FILES+=usr/share/man/man3/krb5_decrypt.3.gz
OLD_FILES+=usr/share/man/man3/krb5_decrypt_EncryptedData.3.gz
OLD_FILES+=usr/share/man/man3/krb5_decrypt_iov_ivec.3.gz
OLD_FILES+=usr/share/man/man3/krb5_deprecated.3.gz
OLD_FILES+=usr/share/man/man3/krb5_digest.3.gz
OLD_FILES+=usr/share/man/man3/krb5_digest_probe.3.gz
OLD_FILES+=usr/share/man/man3/krb5_eai_to_heim_errno.3.gz
OLD_FILES+=usr/share/man/man3/krb5_encrypt.3.gz
OLD_FILES+=usr/share/man/man3/krb5_encrypt_EncryptedData.3.gz
OLD_FILES+=usr/share/man/man3/krb5_encrypt_iov_ivec.3.gz
OLD_FILES+=usr/share/man/man3/krb5_enctype_disable.3.gz
OLD_FILES+=usr/share/man/man3/krb5_enctype_enable.3.gz
OLD_FILES+=usr/share/man/man3/krb5_enctype_valid.3.gz
OLD_FILES+=usr/share/man/man3/krb5_enctypes_compatible_keys.3.gz
OLD_FILES+=usr/share/man/man3/krb5_error.3.gz
OLD_FILES+=usr/share/man/man3/krb5_expand_hostname.3.gz
OLD_FILES+=usr/share/man/man3/krb5_expand_hostname_realms.3.gz
OLD_FILES+=usr/share/man/man3/krb5_fcc_ops.3.gz
OLD_FILES+=usr/share/man/man3/krb5_fileformats.3.gz
OLD_FILES+=usr/share/man/man3/krb5_find_padata.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_config_files.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_context.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_cred_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_creds_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_data.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_data_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_error_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_host_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_keyblock.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_keyblock_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_krbhst.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_ticket.3.gz
OLD_FILES+=usr/share/man/man3/krb5_free_unparsed_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_fwd_tgt_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_generate_random_block.3.gz
OLD_FILES+=usr/share/man/man3/krb5_generate_subkey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_generate_subkey_extended.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_all_client_addrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_all_server_addrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_cred_from_kdc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_cred_from_kdc_opt.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_credentials.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_default_config_files.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_default_in_tkt_etypes.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_default_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_default_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_default_realms.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_dns_canonicalize_hostname.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_extra_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_fcache_version.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_forwarded_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_host_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_ignore_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_in_cred.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_in_tkt_with_keytab.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_in_tkt_with_password.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_in_tkt_with_skey.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_keyblock.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_keytab.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_alloc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_get_error.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_password.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_kdc_sec_offset.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_krb524hst.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_krb_admin_hst.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_krb_changepw_hst.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_krbhst.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_max_time_skew.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_use_admin_kdc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_get_validated_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_getportbyname.3.gz
OLD_FILES+=usr/share/man/man3/krb5_h_addr2addr.3.gz
OLD_FILES+=usr/share/man/man3/krb5_h_addr2sockaddr.3.gz
OLD_FILES+=usr/share/man/man3/krb5_h_errno_to_heim_errno.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_context.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_get.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_get_error.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_intro.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_set_keytab.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_set_password.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_set_service.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_creds_step.3.gz
OLD_FILES+=usr/share/man/man3/krb5_init_ets.3.gz
OLD_FILES+=usr/share/man/man3/krb5_initlog.3.gz
OLD_FILES+=usr/share/man/man3/krb5_introduction.3.gz
OLD_FILES+=usr/share/man/man3/krb5_is_config_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_is_thread_safe.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kerberos_enctypes.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keyblock_get_enctype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keyblock_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keyblock_zero.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keytab.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keytab_intro.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keytab_key_proc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keytype_to_enctypes.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keytype_to_enctypes_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_keytype_to_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_format_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_get_addrinfo.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_next.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_next_as_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_krbhst_reset.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_add_entry.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_close.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_compare.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_copy_entry_contents.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_default_modify_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_default_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_destroy.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_end_seq_get.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_free_entry.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_get_entry.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_get_full_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_get_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_get_type.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_have_content.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_next_entry.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_read_service_key.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_register.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_remove_entry.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_resolve.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kt_start_seq_get.3.gz
OLD_FILES+=usr/share/man/man3/krb5_kuserok.3.gz
OLD_FILES+=usr/share/man/man3/krb5_log.3.gz
OLD_FILES+=usr/share/man/man3/krb5_log_msg.3.gz
OLD_FILES+=usr/share/man/man3/krb5_make_addrport.3.gz
OLD_FILES+=usr/share/man/man3/krb5_make_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_max_sockaddr_size.3.gz
OLD_FILES+=usr/share/man/man3/krb5_mcc_ops.3.gz
OLD_FILES+=usr/share/man/man3/krb5_mk_req.3.gz
OLD_FILES+=usr/share/man/man3/krb5_mk_safe.3.gz
OLD_FILES+=usr/share/man/man3/krb5_openlog.3.gz
OLD_FILES+=usr/share/man/man3/krb5_pac.3.gz
OLD_FILES+=usr/share/man/man3/krb5_pac_get_buffer.3.gz
OLD_FILES+=usr/share/man/man3/krb5_pac_verify.3.gz
OLD_FILES+=usr/share/man/man3/krb5_parse_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_parse_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_parse_name_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_parse_nametype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_password_key_proc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_plugin_register.3.gz
OLD_FILES+=usr/share/man/man3/krb5_prepend_config_files_default.3.gz
OLD_FILES+=usr/share/man/man3/krb5_princ_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_princ_set_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_compare.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_compare_any_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_get_comp_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_get_num_comp.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_get_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_get_type.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_intro.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_is_krbtgt.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_match.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_set_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_principal_set_type.3.gz
OLD_FILES+=usr/share/man/man3/krb5_print_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_random_to_key.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rcache.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_error.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_req_ctx.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_req_in_ctx_alloc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_req_in_set_keytab.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_req_in_set_pac_check.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_req_out_ctx_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_req_out_get_server.3.gz
OLD_FILES+=usr/share/man/man3/krb5_rd_safe.3.gz
OLD_FILES+=usr/share/man/man3/krb5_realm_compare.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_addrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_authdata.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_creds_tag.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_data.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_int16.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_int32.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_int8.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_keyblock.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_stringz.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_times.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_uint16.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_uint32.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ret_uint8.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_config_files.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_default_in_tkt_etypes.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_default_realm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_dns_canonicalize_hostname.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_error_message.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_error_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_extra_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_fcache_version.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_home_dir_access.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_ignore_addresses.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_kdc_sec_offset.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_max_time_skew.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_password.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_real_time.3.gz
OLD_FILES+=usr/share/man/man3/krb5_set_use_admin_kdc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_sname_to_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_sock_to_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_sockaddr2address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_sockaddr2port.3.gz
OLD_FILES+=usr/share/man/man3/krb5_sockaddr_uninteresting.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_clear_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_emem.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_free.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_from_data.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_from_fd.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_from_mem.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_from_readonly_mem.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_get_byteorder.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_get_eof_code.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_is_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_read.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_seek.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_set_byteorder.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_set_eof_code.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_set_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_set_max_alloc.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_to_data.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_truncate.3.gz
OLD_FILES+=usr/share/man/man3/krb5_storage_write.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_address.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_addrs.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_authdata.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_creds_tag.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_data.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_int16.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_int32.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_int8.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_keyblock.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_principal.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_stringz.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_times.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_uint16.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_uint32.3.gz
OLD_FILES+=usr/share/man/man3/krb5_store_uint8.3.gz
OLD_FILES+=usr/share/man/man3/krb5_string_to_key.3.gz
OLD_FILES+=usr/share/man/man3/krb5_string_to_keytype.3.gz
OLD_FILES+=usr/share/man/man3/krb5_support.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ticket.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ticket_get_authorization_data_type.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ticket_get_client.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ticket_get_endtime.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ticket_get_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_ticket_get_server.3.gz
OLD_FILES+=usr/share/man/man3/krb5_timeofday.3.gz
OLD_FILES+=usr/share/man/man3/krb5_unparse_name.3.gz
OLD_FILES+=usr/share/man/man3/krb5_unparse_name_fixed.3.gz
OLD_FILES+=usr/share/man/man3/krb5_unparse_name_fixed_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_unparse_name_fixed_short.3.gz
OLD_FILES+=usr/share/man/man3/krb5_unparse_name_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_unparse_name_short.3.gz
OLD_FILES+=usr/share/man/man3/krb5_us_timeofday.3.gz
OLD_FILES+=usr/share/man/man3/krb5_v4compat.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_checksum.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_checksum_iov.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_init_creds.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_opt_init.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_flags.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_keytab.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_secure.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_service.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_user.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_user_lrealm.3.gz
OLD_FILES+=usr/share/man/man3/krb5_verify_user_opt.3.gz
OLD_FILES+=usr/share/man/man3/krb5_vlog.3.gz
OLD_FILES+=usr/share/man/man3/krb5_vlog_msg.3.gz
OLD_FILES+=usr/share/man/man3/krb5_vset_error_string.3.gz
OLD_FILES+=usr/share/man/man3/krb5_vwarn.3.gz
OLD_FILES+=usr/share/man/man3/krb_afslog.3.gz
OLD_FILES+=usr/share/man/man3/krb_afslog_uid.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_buf.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_core.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_type1.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_type2.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_type3.3.gz
OLD_FILES+=usr/share/man/man5/krb5.conf.5.gz
OLD_FILES+=usr/share/man/man8/hprop.8.gz
OLD_FILES+=usr/share/man/man8/hpropd.8.gz
OLD_FILES+=usr/share/man/man8/iprop-log.8.gz
OLD_FILES+=usr/share/man/man8/iprop.8.gz
OLD_FILES+=usr/share/man/man8/kadmin.8.gz
OLD_FILES+=usr/share/man/man8/kadmind.8.gz
OLD_FILES+=usr/share/man/man8/kcm.8.gz
OLD_FILES+=usr/share/man/man8/kdc.8.gz
OLD_FILES+=usr/share/man/man8/kdigest.8.gz
OLD_FILES+=usr/share/man/man8/kerberos.8.gz
OLD_FILES+=usr/share/man/man8/kimpersonate.8.gz
OLD_FILES+=usr/share/man/man8/kpasswdd.8.gz
OLD_FILES+=usr/share/man/man8/kstash.8.gz
OLD_FILES+=usr/share/man/man8/ktutil.8.gz
OLD_FILES+=usr/share/man/man8/pam_krb5.8.gz
OLD_FILES+=usr/share/man/man8/pam_ksu.8.gz
OLD_FILES+=usr/share/man/man8/string2key.8.gz
OLD_FILES+=usr/share/man/man8/verify_krb5_conf.8.gz
.endif
.if ${MK_KERBEROS_SUPPORT} == no
OLD_FILES+=usr/bin/compile_et
OLD_FILES+=usr/include/com_err.h
OLD_FILES+=usr/include/com_right.h
OLD_FILES+=usr/lib/libcom_err.a
OLD_FILES+=usr/lib/libcom_err.so
OLD_LIBS+=usr/lib/libcom_err.so.5
OLD_FILES+=usr/lib/libcom_err_p.a
OLD_FILES+=usr/lib32/libcom_err.a
OLD_FILES+=usr/lib32/libcom_err.so
OLD_LIBS+=usr/lib32/libcom_err.so.5
OLD_FILES+=usr/lib32/libcom_err_p.a
OLD_FILES+=usr/share/man/man1/compile_et.1.gz
OLD_FILES+=usr/share/man/man3/com_err.3.gz
.endif
.if ${MK_LDNS} == no
OLD_FILES+=usr/lib/private/libldns.a
OLD_FILES+=usr/lib/private/libldns.so
OLD_LIBS+=usr/lib/private/libldns.so.5
OLD_FILES+=usr/lib/private/libldns_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/private/libldns.a
OLD_FILES+=usr/lib32/private/libldns.so
OLD_LIBS+=usr/lib32/private/libldns.so.5
OLD_FILES+=usr/lib32/private/libldns_p.a
.endif
.endif
.if ${MK_LDNS_UTILS} == no
OLD_FILES+=usr/bin/drill
OLD_FILES+=usr/share/man/man1/drill.1.gz
OLD_FILES+=usr/bin/host
OLD_FILES+=usr/share/man/man1/host.1.gz
.endif
.if ${MK_LEGACY_CONSOLE} == no
OLD_FILES+=usr/sbin/kbdcontrol
OLD_FILES+=usr/sbin/kbdmap
OLD_FILES+=usr/sbin/moused
OLD_FILES+=usr/sbin/vidcontrol
OLD_FILES+=usr/sbin/vidfont
OLD_FILES+=usr/share/man/man1/kbdcontrol.1.gz
OLD_FILES+=usr/share/man/man1/kbdmap.1.gz
OLD_FILES+=usr/share/man/man1/vidcontrol.1.gz
OLD_FILES+=usr/share/man/man1/vidfont.1.gz
OLD_FILES+=usr/share/man/man5/kbdmap.5.gz
OLD_FILES+=usr/share/man/man5/keymap.5.gz
OLD_FILES+=usr/share/man/man8/moused.8.gz
.endif
.if ${MK_LIB32} == no
OLD_FILES+=etc/mtree/BSD.lib32.dist
OLD_FILES+=libexec/ld-elf32.so.1
. if exists(${DESTDIR}/usr/lib32)
LIB32_DIRS!=find ${DESTDIR}/usr/lib32 -type d \
| sed -e 's,^${DESTDIR}/,,'; echo
LIB32_FILES!=find ${DESTDIR}/usr/lib32 \! -type d \
\! -name "lib*.so*" | sed -e 's,^${DESTDIR}/,,'; echo
LIB32_LIBS!=find ${DESTDIR}/usr/lib32 \! -type d \
-name "lib*.so*" | sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${LIB32_DIRS}
OLD_FILES+=${LIB32_FILES}
OLD_LIBS+=${LIB32_LIBS}
. endif
. if ${MK_DEBUG_FILES} == no
. if exists(${DESTDIR}/usr/lib/debug/usr/lib32)
DEBUG_LIB32_DIRS!=find ${DESTDIR}/usr/lib/debug/usr/lib32 -type d \
| sed -e 's,^${DESTDIR}/,,'; echo
DEBUG_LIB32_FILES!=find ${DESTDIR}/usr/lib/debug/usr/lib32 \! -type d \
\! -name "lib*.so*" | sed -e 's,^${DESTDIR}/,,'; echo
DEBUG_LIB32_LIBS!=find ${DESTDIR}/usr/lib/debug/usr/lib32 \! -type d \
-name "lib*.so*" | sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${DEBUG_LIB32_DIRS}
OLD_FILES+=${DEBUG_LIB32_FILES}
OLD_LIBS+=${DEBUG_LIB32_LIBS}
. endif
. endif
.endif
.if ${MK_LIBCPLUSPLUS} == no
OLD_LIBS+=lib/libcxxrt.so.1
OLD_FILES+=usr/lib/libc++.a
OLD_FILES+=usr/lib/libc++_p.a
OLD_FILES+=usr/lib/libc++experimental.a
OLD_FILES+=usr/lib/libc++fs.a
OLD_FILES+=usr/lib/libc++.so
OLD_LIBS+=usr/lib/libc++.so.1
OLD_FILES+=usr/lib/libcxxrt.a
OLD_FILES+=usr/lib/libcxxrt.so
OLD_FILES+=usr/lib/libcxxrt_p.a
OLD_FILES+=usr/include/c++/v1/__bit_reference
OLD_FILES+=usr/include/c++/v1/__bsd_locale_defaults.h
OLD_FILES+=usr/include/c++/v1/__bsd_locale_fallbacks.h
OLD_FILES+=usr/include/c++/v1/__config
OLD_FILES+=usr/include/c++/v1/__debug
OLD_FILES+=usr/include/c++/v1/__errc
OLD_FILES+=usr/include/c++/v1/__functional_03
OLD_FILES+=usr/include/c++/v1/__functional_base
OLD_FILES+=usr/include/c++/v1/__functional_base_03
OLD_FILES+=usr/include/c++/v1/__hash_table
OLD_FILES+=usr/include/c++/v1/__libcpp_version
OLD_FILES+=usr/include/c++/v1/__locale
OLD_FILES+=usr/include/c++/v1/__mutex_base
OLD_FILES+=usr/include/c++/v1/__node_handle
OLD_FILES+=usr/include/c++/v1/__nullptr
OLD_FILES+=usr/include/c++/v1/__split_buffer
OLD_FILES+=usr/include/c++/v1/__sso_allocator
OLD_FILES+=usr/include/c++/v1/__std_stream
OLD_FILES+=usr/include/c++/v1/__string
OLD_FILES+=usr/include/c++/v1/__threading_support
OLD_FILES+=usr/include/c++/v1/__tree
OLD_FILES+=usr/include/c++/v1/__tuple
OLD_FILES+=usr/include/c++/v1/__undef_macros
OLD_FILES+=usr/include/c++/v1/algorithm
OLD_FILES+=usr/include/c++/v1/any
OLD_FILES+=usr/include/c++/v1/array
OLD_FILES+=usr/include/c++/v1/atomic
OLD_FILES+=usr/include/c++/v1/bitset
OLD_FILES+=usr/include/c++/v1/cassert
OLD_FILES+=usr/include/c++/v1/ccomplex
OLD_FILES+=usr/include/c++/v1/cctype
OLD_FILES+=usr/include/c++/v1/cerrno
OLD_FILES+=usr/include/c++/v1/cfenv
OLD_FILES+=usr/include/c++/v1/cfloat
OLD_FILES+=usr/include/c++/v1/charconv
OLD_FILES+=usr/include/c++/v1/chrono
OLD_FILES+=usr/include/c++/v1/cinttypes
OLD_FILES+=usr/include/c++/v1/ciso646
OLD_FILES+=usr/include/c++/v1/climits
OLD_FILES+=usr/include/c++/v1/clocale
OLD_FILES+=usr/include/c++/v1/cmath
OLD_FILES+=usr/include/c++/v1/codecvt
OLD_FILES+=usr/include/c++/v1/compare
OLD_FILES+=usr/include/c++/v1/complex
OLD_FILES+=usr/include/c++/v1/complex.h
OLD_FILES+=usr/include/c++/v1/condition_variable
OLD_FILES+=usr/include/c++/v1/csetjmp
OLD_FILES+=usr/include/c++/v1/csignal
OLD_FILES+=usr/include/c++/v1/cstdarg
OLD_FILES+=usr/include/c++/v1/cstdbool
OLD_FILES+=usr/include/c++/v1/cstddef
OLD_FILES+=usr/include/c++/v1/cstdint
OLD_FILES+=usr/include/c++/v1/cstdio
OLD_FILES+=usr/include/c++/v1/cstdlib
OLD_FILES+=usr/include/c++/v1/cstring
OLD_FILES+=usr/include/c++/v1/ctgmath
OLD_FILES+=usr/include/c++/v1/ctime
OLD_FILES+=usr/include/c++/v1/ctype.h
OLD_FILES+=usr/include/c++/v1/cwchar
OLD_FILES+=usr/include/c++/v1/cwctype
OLD_FILES+=usr/include/c++/v1/cxxabi.h
OLD_FILES+=usr/include/c++/v1/deque
OLD_FILES+=usr/include/c++/v1/errno.h
OLD_FILES+=usr/include/c++/v1/exception
OLD_FILES+=usr/include/c++/v1/experimental/__config
OLD_FILES+=usr/include/c++/v1/experimental/__memory
OLD_FILES+=usr/include/c++/v1/experimental/algorithm
OLD_FILES+=usr/include/c++/v1/experimental/any
OLD_FILES+=usr/include/c++/v1/experimental/chrono
OLD_FILES+=usr/include/c++/v1/experimental/coroutine
OLD_FILES+=usr/include/c++/v1/experimental/deque
OLD_FILES+=usr/include/c++/v1/experimental/dynarray
OLD_FILES+=usr/include/c++/v1/experimental/filesystem
OLD_FILES+=usr/include/c++/v1/experimental/forward_list
OLD_FILES+=usr/include/c++/v1/experimental/functional
OLD_FILES+=usr/include/c++/v1/experimental/iterator
OLD_FILES+=usr/include/c++/v1/experimental/list
OLD_FILES+=usr/include/c++/v1/experimental/map
OLD_FILES+=usr/include/c++/v1/experimental/memory_resource
OLD_FILES+=usr/include/c++/v1/experimental/numeric
OLD_FILES+=usr/include/c++/v1/experimental/optional
OLD_FILES+=usr/include/c++/v1/experimental/propagate_const
OLD_FILES+=usr/include/c++/v1/experimental/ratio
OLD_FILES+=usr/include/c++/v1/experimental/regex
OLD_FILES+=usr/include/c++/v1/experimental/set
OLD_FILES+=usr/include/c++/v1/experimental/simd
OLD_FILES+=usr/include/c++/v1/experimental/string
OLD_FILES+=usr/include/c++/v1/experimental/string_view
OLD_FILES+=usr/include/c++/v1/experimental/system_error
OLD_FILES+=usr/include/c++/v1/experimental/tuple
OLD_FILES+=usr/include/c++/v1/experimental/type_traits
OLD_FILES+=usr/include/c++/v1/experimental/unordered_map
OLD_FILES+=usr/include/c++/v1/experimental/unordered_set
OLD_FILES+=usr/include/c++/v1/experimental/utility
OLD_FILES+=usr/include/c++/v1/experimental/vector
OLD_FILES+=usr/include/c++/v1/ext/__hash
OLD_FILES+=usr/include/c++/v1/ext/hash_map
OLD_FILES+=usr/include/c++/v1/ext/hash_set
OLD_FILES+=usr/include/c++/v1/filesystem
OLD_FILES+=usr/include/c++/v1/float.h
OLD_FILES+=usr/include/c++/v1/forward_list
OLD_FILES+=usr/include/c++/v1/fstream
OLD_FILES+=usr/include/c++/v1/functional
OLD_FILES+=usr/include/c++/v1/future
OLD_FILES+=usr/include/c++/v1/initializer_list
OLD_FILES+=usr/include/c++/v1/inttypes.h
OLD_FILES+=usr/include/c++/v1/iomanip
OLD_FILES+=usr/include/c++/v1/ios
OLD_FILES+=usr/include/c++/v1/iosfwd
OLD_FILES+=usr/include/c++/v1/iostream
OLD_FILES+=usr/include/c++/v1/istream
OLD_FILES+=usr/include/c++/v1/iterator
OLD_FILES+=usr/include/c++/v1/limits
OLD_FILES+=usr/include/c++/v1/limits.h
OLD_FILES+=usr/include/c++/v1/list
OLD_FILES+=usr/include/c++/v1/locale
OLD_FILES+=usr/include/c++/v1/locale.h
OLD_FILES+=usr/include/c++/v1/map
OLD_FILES+=usr/include/c++/v1/math.h
OLD_FILES+=usr/include/c++/v1/memory
OLD_FILES+=usr/include/c++/v1/mutex
OLD_FILES+=usr/include/c++/v1/new
OLD_FILES+=usr/include/c++/v1/numeric
OLD_FILES+=usr/include/c++/v1/numeric
OLD_FILES+=usr/include/c++/v1/optional
OLD_FILES+=usr/include/c++/v1/ostream
OLD_FILES+=usr/include/c++/v1/queue
OLD_FILES+=usr/include/c++/v1/random
OLD_FILES+=usr/include/c++/v1/ratio
OLD_FILES+=usr/include/c++/v1/regex
OLD_FILES+=usr/include/c++/v1/scoped_allocator
OLD_FILES+=usr/include/c++/v1/set
OLD_FILES+=usr/include/c++/v1/setjmp.h
OLD_FILES+=usr/include/c++/v1/shared_mutex
OLD_FILES+=usr/include/c++/v1/span
OLD_FILES+=usr/include/c++/v1/sstream
OLD_FILES+=usr/include/c++/v1/stack
OLD_FILES+=usr/include/c++/v1/stdbool.h
OLD_FILES+=usr/include/c++/v1/stddef.h
OLD_FILES+=usr/include/c++/v1/stdexcept
OLD_FILES+=usr/include/c++/v1/stdint.h
OLD_FILES+=usr/include/c++/v1/stdio.h
OLD_FILES+=usr/include/c++/v1/stdlib.h
OLD_FILES+=usr/include/c++/v1/streambuf
OLD_FILES+=usr/include/c++/v1/string
OLD_FILES+=usr/include/c++/v1/string.h
OLD_FILES+=usr/include/c++/v1/string_view
OLD_FILES+=usr/include/c++/v1/strstream
OLD_FILES+=usr/include/c++/v1/system_error
OLD_FILES+=usr/include/c++/v1/tgmath.h
OLD_FILES+=usr/include/c++/v1/thread
OLD_FILES+=usr/include/c++/v1/version
OLD_FILES+=usr/include/c++/v1/tr1/__bit_reference
OLD_FILES+=usr/include/c++/v1/tr1/__bsd_locale_defaults.h
OLD_FILES+=usr/include/c++/v1/tr1/__bsd_locale_fallbacks.h
OLD_FILES+=usr/include/c++/v1/tr1/__config
OLD_FILES+=usr/include/c++/v1/tr1/__debug
OLD_FILES+=usr/include/c++/v1/tr1/__functional_03
OLD_FILES+=usr/include/c++/v1/tr1/__functional_base
OLD_FILES+=usr/include/c++/v1/tr1/__functional_base_03
OLD_FILES+=usr/include/c++/v1/tr1/__hash_table
OLD_FILES+=usr/include/c++/v1/tr1/__libcpp_version
OLD_FILES+=usr/include/c++/v1/tr1/__locale
OLD_FILES+=usr/include/c++/v1/tr1/__mutex_base
OLD_FILES+=usr/include/c++/v1/tr1/__nullptr
OLD_FILES+=usr/include/c++/v1/tr1/__split_buffer
OLD_FILES+=usr/include/c++/v1/tr1/__sso_allocator
OLD_FILES+=usr/include/c++/v1/tr1/__std_stream
OLD_FILES+=usr/include/c++/v1/tr1/__string
OLD_FILES+=usr/include/c++/v1/tr1/__threading_support
OLD_FILES+=usr/include/c++/v1/tr1/__tree
OLD_FILES+=usr/include/c++/v1/tr1/__tuple
OLD_FILES+=usr/include/c++/v1/tr1/__undef_macros
OLD_FILES+=usr/include/c++/v1/tr1/algorithm
OLD_FILES+=usr/include/c++/v1/tr1/any
OLD_FILES+=usr/include/c++/v1/tr1/array
OLD_FILES+=usr/include/c++/v1/tr1/atomic
OLD_FILES+=usr/include/c++/v1/tr1/bitset
OLD_FILES+=usr/include/c++/v1/tr1/cassert
OLD_FILES+=usr/include/c++/v1/tr1/ccomplex
OLD_FILES+=usr/include/c++/v1/tr1/cctype
OLD_FILES+=usr/include/c++/v1/tr1/cerrno
OLD_FILES+=usr/include/c++/v1/tr1/cfenv
OLD_FILES+=usr/include/c++/v1/tr1/cfloat
OLD_FILES+=usr/include/c++/v1/tr1/chrono
OLD_FILES+=usr/include/c++/v1/tr1/cinttypes
OLD_FILES+=usr/include/c++/v1/tr1/ciso646
OLD_FILES+=usr/include/c++/v1/tr1/climits
OLD_FILES+=usr/include/c++/v1/tr1/clocale
OLD_FILES+=usr/include/c++/v1/tr1/cmath
OLD_FILES+=usr/include/c++/v1/tr1/codecvt
OLD_FILES+=usr/include/c++/v1/tr1/complex
OLD_FILES+=usr/include/c++/v1/tr1/complex.h
OLD_FILES+=usr/include/c++/v1/tr1/condition_variable
OLD_FILES+=usr/include/c++/v1/tr1/csetjmp
OLD_FILES+=usr/include/c++/v1/tr1/csignal
OLD_FILES+=usr/include/c++/v1/tr1/cstdarg
OLD_FILES+=usr/include/c++/v1/tr1/cstdbool
OLD_FILES+=usr/include/c++/v1/tr1/cstddef
OLD_FILES+=usr/include/c++/v1/tr1/cstdint
OLD_FILES+=usr/include/c++/v1/tr1/cstdio
OLD_FILES+=usr/include/c++/v1/tr1/cstdlib
OLD_FILES+=usr/include/c++/v1/tr1/cstring
OLD_FILES+=usr/include/c++/v1/tr1/ctgmath
OLD_FILES+=usr/include/c++/v1/tr1/ctime
OLD_FILES+=usr/include/c++/v1/tr1/ctype.h
OLD_FILES+=usr/include/c++/v1/tr1/cwchar
OLD_FILES+=usr/include/c++/v1/tr1/cwctype
OLD_FILES+=usr/include/c++/v1/tr1/deque
OLD_FILES+=usr/include/c++/v1/tr1/errno.h
OLD_FILES+=usr/include/c++/v1/tr1/exception
OLD_FILES+=usr/include/c++/v1/tr1/float.h
OLD_FILES+=usr/include/c++/v1/tr1/forward_list
OLD_FILES+=usr/include/c++/v1/tr1/fstream
OLD_FILES+=usr/include/c++/v1/tr1/functional
OLD_FILES+=usr/include/c++/v1/tr1/future
OLD_FILES+=usr/include/c++/v1/tr1/initializer_list
OLD_FILES+=usr/include/c++/v1/tr1/inttypes.h
OLD_FILES+=usr/include/c++/v1/tr1/iomanip
OLD_FILES+=usr/include/c++/v1/tr1/ios
OLD_FILES+=usr/include/c++/v1/tr1/iosfwd
OLD_FILES+=usr/include/c++/v1/tr1/iostream
OLD_FILES+=usr/include/c++/v1/tr1/istream
OLD_FILES+=usr/include/c++/v1/tr1/iterator
OLD_FILES+=usr/include/c++/v1/tr1/limits
OLD_FILES+=usr/include/c++/v1/tr1/limits.h
OLD_FILES+=usr/include/c++/v1/tr1/list
OLD_FILES+=usr/include/c++/v1/tr1/locale
OLD_FILES+=usr/include/c++/v1/tr1/locale.h
OLD_FILES+=usr/include/c++/v1/tr1/map
OLD_FILES+=usr/include/c++/v1/tr1/math.h
OLD_FILES+=usr/include/c++/v1/tr1/memory
OLD_FILES+=usr/include/c++/v1/tr1/mutex
OLD_FILES+=usr/include/c++/v1/tr1/new
OLD_FILES+=usr/include/c++/v1/tr1/numeric
OLD_FILES+=usr/include/c++/v1/tr1/numeric
OLD_FILES+=usr/include/c++/v1/tr1/optional
OLD_FILES+=usr/include/c++/v1/tr1/ostream
OLD_FILES+=usr/include/c++/v1/tr1/queue
OLD_FILES+=usr/include/c++/v1/tr1/random
OLD_FILES+=usr/include/c++/v1/tr1/ratio
OLD_FILES+=usr/include/c++/v1/tr1/regex
OLD_FILES+=usr/include/c++/v1/tr1/scoped_allocator
OLD_FILES+=usr/include/c++/v1/tr1/set
OLD_FILES+=usr/include/c++/v1/tr1/setjmp.h
OLD_FILES+=usr/include/c++/v1/tr1/shared_mutex
OLD_FILES+=usr/include/c++/v1/tr1/sstream
OLD_FILES+=usr/include/c++/v1/tr1/stack
OLD_FILES+=usr/include/c++/v1/tr1/stdbool.h
OLD_FILES+=usr/include/c++/v1/tr1/stddef.h
OLD_FILES+=usr/include/c++/v1/tr1/stdexcept
OLD_FILES+=usr/include/c++/v1/tr1/stdint.h
OLD_FILES+=usr/include/c++/v1/tr1/stdio.h
OLD_FILES+=usr/include/c++/v1/tr1/stdlib.h
OLD_FILES+=usr/include/c++/v1/tr1/streambuf
OLD_FILES+=usr/include/c++/v1/tr1/string
OLD_FILES+=usr/include/c++/v1/tr1/string.h
OLD_FILES+=usr/include/c++/v1/tr1/string_view
OLD_FILES+=usr/include/c++/v1/tr1/strstream
OLD_FILES+=usr/include/c++/v1/tr1/system_error
OLD_FILES+=usr/include/c++/v1/tr1/tgmath.h
OLD_FILES+=usr/include/c++/v1/tr1/thread
OLD_FILES+=usr/include/c++/v1/tr1/tuple
OLD_FILES+=usr/include/c++/v1/tr1/type_traits
OLD_FILES+=usr/include/c++/v1/tr1/typeindex
OLD_FILES+=usr/include/c++/v1/tr1/typeinfo
OLD_FILES+=usr/include/c++/v1/tr1/unordered_map
OLD_FILES+=usr/include/c++/v1/tr1/unordered_set
OLD_FILES+=usr/include/c++/v1/tr1/utility
OLD_FILES+=usr/include/c++/v1/tr1/valarray
OLD_FILES+=usr/include/c++/v1/tr1/variant
OLD_FILES+=usr/include/c++/v1/tr1/vector
OLD_FILES+=usr/include/c++/v1/tr1/wchar.h
OLD_FILES+=usr/include/c++/v1/tr1/wctype.h
OLD_FILES+=usr/include/c++/v1/tuple
OLD_FILES+=usr/include/c++/v1/type_traits
OLD_FILES+=usr/include/c++/v1/typeindex
OLD_FILES+=usr/include/c++/v1/typeinfo
OLD_FILES+=usr/include/c++/v1/unordered_map
OLD_FILES+=usr/include/c++/v1/unordered_set
OLD_FILES+=usr/include/c++/v1/unwind-arm.h
OLD_FILES+=usr/include/c++/v1/unwind-itanium.h
OLD_FILES+=usr/include/c++/v1/unwind.h
OLD_FILES+=usr/include/c++/v1/utility
OLD_FILES+=usr/include/c++/v1/valarray
OLD_FILES+=usr/include/c++/v1/variant
OLD_FILES+=usr/include/c++/v1/vector
OLD_FILES+=usr/include/c++/v1/wchar.h
OLD_FILES+=usr/include/c++/v1/wctype.h
OLD_FILES+=usr/lib32/libc++.a
OLD_FILES+=usr/lib32/libc++.so
OLD_LIBS+=usr/lib32/libc++.so.1
OLD_FILES+=usr/lib32/libc++_p.a
OLD_FILES+=usr/lib32/libc++experimental.a
OLD_FILES+=usr/lib32/libc++fs.a
OLD_FILES+=usr/lib32/libcxxrt.a
OLD_FILES+=usr/lib32/libcxxrt.so
OLD_LIBS+=usr/lib32/libcxxrt.so.1
OLD_FILES+=usr/lib32/libcxxrt_p.a
OLD_DIRS+=usr/include/c++/v1/tr1
OLD_DIRS+=usr/include/c++/v1/experimental
OLD_DIRS+=usr/include/c++/v1/ext
OLD_DIRS+=usr/include/c++/v1
.endif
.if ${MK_LIBTHR} == no
OLD_LIBS+=lib/libthr.so.3
OLD_FILES+=usr/lib/libthr.a
OLD_FILES+=usr/lib/libthr_p.a
OLD_FILES+=usr/share/man/man3/libthr.3.gz
.endif
.if ${MK_LLD} == no
OLD_FILES+=usr/bin/ld.lld
.endif
.if ${MK_LLDB} == no
OLD_FILES+=usr/bin/lldb
OLD_FILES+=usr/share/man/man1/lldb.1.gz
.endif
.if ${MK_LOCALES} == no
OLD_DIRS+=usr/share/locale
OLD_DIRS+=usr/share/locale/af_ZA.ISO8859-15
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/af_ZA.ISO8859-1
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/af_ZA.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/af_ZA.UTF-8
OLD_FILES+=usr/share/locale/af_ZA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/af_ZA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/af_ZA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/af_ZA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/af_ZA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/af_ZA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/am_ET.UTF-8
OLD_FILES+=usr/share/locale/am_ET.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/am_ET.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/am_ET.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/am_ET.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/am_ET.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/am_ET.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ar_AE.UTF-8
OLD_FILES+=usr/share/locale/ar_AE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ar_AE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ar_AE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ar_AE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ar_AE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ar_AE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ar_EG.UTF-8
OLD_FILES+=usr/share/locale/ar_EG.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ar_EG.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ar_EG.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ar_EG.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ar_EG.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ar_EG.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ar_JO.UTF-8
OLD_FILES+=usr/share/locale/ar_JO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ar_JO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ar_JO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ar_JO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ar_JO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ar_JO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ar_MA.UTF-8
OLD_FILES+=usr/share/locale/ar_MA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ar_MA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ar_MA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ar_MA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ar_MA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ar_MA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ar_QA.UTF-8
OLD_FILES+=usr/share/locale/ar_QA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ar_QA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ar_QA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ar_QA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ar_QA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ar_QA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ar_SA.UTF-8
OLD_FILES+=usr/share/locale/ar_SA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ar_SA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ar_SA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ar_SA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ar_SA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ar_SA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/be_BY.CP1131
OLD_FILES+=usr/share/locale/be_BY.CP1131/LC_COLLATE
OLD_FILES+=usr/share/locale/be_BY.CP1131/LC_CTYPE
OLD_FILES+=usr/share/locale/be_BY.CP1131/LC_MESSAGES
OLD_FILES+=usr/share/locale/be_BY.CP1131/LC_MONETARY
OLD_FILES+=usr/share/locale/be_BY.CP1131/LC_NUMERIC
OLD_FILES+=usr/share/locale/be_BY.CP1131/LC_TIME
OLD_DIRS+=usr/share/locale/be_BY.CP1251
OLD_FILES+=usr/share/locale/be_BY.CP1251/LC_COLLATE
OLD_FILES+=usr/share/locale/be_BY.CP1251/LC_CTYPE
OLD_FILES+=usr/share/locale/be_BY.CP1251/LC_MESSAGES
OLD_FILES+=usr/share/locale/be_BY.CP1251/LC_MONETARY
OLD_FILES+=usr/share/locale/be_BY.CP1251/LC_NUMERIC
OLD_FILES+=usr/share/locale/be_BY.CP1251/LC_TIME
OLD_DIRS+=usr/share/locale/be_BY.ISO8859-5
OLD_FILES+=usr/share/locale/be_BY.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/be_BY.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/be_BY.ISO8859-5/LC_MESSAGES
OLD_FILES+=usr/share/locale/be_BY.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/be_BY.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/be_BY.ISO8859-5/LC_TIME
OLD_DIRS+=usr/share/locale/be_BY.UTF-8
OLD_FILES+=usr/share/locale/be_BY.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/be_BY.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/be_BY.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/be_BY.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/be_BY.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/be_BY.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/bg_BG.CP1251
OLD_FILES+=usr/share/locale/bg_BG.CP1251/LC_COLLATE
OLD_FILES+=usr/share/locale/bg_BG.CP1251/LC_CTYPE
OLD_FILES+=usr/share/locale/bg_BG.CP1251/LC_MESSAGES
OLD_FILES+=usr/share/locale/bg_BG.CP1251/LC_MONETARY
OLD_FILES+=usr/share/locale/bg_BG.CP1251/LC_NUMERIC
OLD_FILES+=usr/share/locale/bg_BG.CP1251/LC_TIME
OLD_DIRS+=usr/share/locale/bg_BG.UTF-8
OLD_FILES+=usr/share/locale/bg_BG.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/bg_BG.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/bg_BG.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/bg_BG.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/bg_BG.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/bg_BG.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ca_AD.ISO8859-1
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/ca_AD.ISO8859-15
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_AD.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/ca_AD.UTF-8
OLD_FILES+=usr/share/locale/ca_AD.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_AD.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_AD.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_AD.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_AD.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_AD.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ca_ES.ISO8859-1
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/ca_ES.ISO8859-15
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_ES.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/ca_ES.UTF-8
OLD_FILES+=usr/share/locale/ca_ES.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_ES.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_ES.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_ES.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_ES.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_ES.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ca_FR.ISO8859-1
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/ca_FR.ISO8859-15
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_FR.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/ca_FR.UTF-8
OLD_FILES+=usr/share/locale/ca_FR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_FR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_FR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_FR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_FR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_FR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ca_IT.ISO8859-1
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/ca_IT.ISO8859-15
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_IT.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/ca_IT.UTF-8
OLD_FILES+=usr/share/locale/ca_IT.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ca_IT.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ca_IT.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ca_IT.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ca_IT.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ca_IT.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/cs_CZ.ISO8859-2
OLD_FILES+=usr/share/locale/cs_CZ.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/cs_CZ.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/cs_CZ.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/cs_CZ.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/cs_CZ.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/cs_CZ.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/cs_CZ.UTF-8
OLD_FILES+=usr/share/locale/cs_CZ.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/cs_CZ.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/cs_CZ.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/cs_CZ.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/cs_CZ.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/cs_CZ.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/da_DK.ISO8859-1
OLD_FILES+=usr/share/locale/da_DK.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/da_DK.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/da_DK.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/da_DK.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/da_DK.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/da_DK.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/da_DK.ISO8859-15
OLD_FILES+=usr/share/locale/da_DK.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/da_DK.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/da_DK.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/da_DK.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/da_DK.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/da_DK.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/da_DK.UTF-8
OLD_FILES+=usr/share/locale/da_DK.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/da_DK.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/da_DK.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/da_DK.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/da_DK.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/da_DK.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/de_AT.ISO8859-1
OLD_FILES+=usr/share/locale/de_AT.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/de_AT.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/de_AT.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_AT.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/de_AT.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_AT.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/de_AT.ISO8859-15
OLD_FILES+=usr/share/locale/de_AT.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/de_AT.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/de_AT.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_AT.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/de_AT.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_AT.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/de_AT.UTF-8
OLD_FILES+=usr/share/locale/de_AT.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/de_AT.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/de_AT.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_AT.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/de_AT.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_AT.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/de_CH.ISO8859-1
OLD_FILES+=usr/share/locale/de_CH.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/de_CH.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/de_CH.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_CH.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/de_CH.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_CH.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/de_CH.ISO8859-15
OLD_FILES+=usr/share/locale/de_CH.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/de_CH.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/de_CH.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_CH.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/de_CH.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_CH.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/de_CH.UTF-8
OLD_FILES+=usr/share/locale/de_CH.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/de_CH.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/de_CH.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_CH.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/de_CH.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_CH.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/de_DE.ISO8859-1
OLD_FILES+=usr/share/locale/de_DE.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/de_DE.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/de_DE.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_DE.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/de_DE.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_DE.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/de_DE.ISO8859-15
OLD_FILES+=usr/share/locale/de_DE.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/de_DE.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/de_DE.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_DE.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/de_DE.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_DE.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/de_DE.UTF-8
OLD_FILES+=usr/share/locale/de_DE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/de_DE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/de_DE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/de_DE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/de_DE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/de_DE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/el_GR.ISO8859-7
OLD_FILES+=usr/share/locale/el_GR.ISO8859-7/LC_COLLATE
OLD_FILES+=usr/share/locale/el_GR.ISO8859-7/LC_CTYPE
OLD_FILES+=usr/share/locale/el_GR.ISO8859-7/LC_MESSAGES
OLD_FILES+=usr/share/locale/el_GR.ISO8859-7/LC_MONETARY
OLD_FILES+=usr/share/locale/el_GR.ISO8859-7/LC_NUMERIC
OLD_FILES+=usr/share/locale/el_GR.ISO8859-7/LC_TIME
OLD_DIRS+=usr/share/locale/el_GR.UTF-8
OLD_FILES+=usr/share/locale/el_GR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/el_GR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/el_GR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/el_GR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/el_GR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/el_GR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_AU.ISO8859-1
OLD_FILES+=usr/share/locale/en_AU.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_AU.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_AU.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_AU.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_AU.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_AU.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_AU.ISO8859-15
OLD_FILES+=usr/share/locale/en_AU.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_AU.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_AU.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_AU.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_AU.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_AU.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_AU.US-ASCII
OLD_FILES+=usr/share/locale/en_AU.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/en_AU.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/en_AU.US-ASCII/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_AU.US-ASCII/LC_MONETARY
OLD_FILES+=usr/share/locale/en_AU.US-ASCII/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_AU.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/en_AU.UTF-8
OLD_FILES+=usr/share/locale/en_AU.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_AU.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_AU.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_AU.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_AU.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_AU.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_CA.ISO8859-1
OLD_FILES+=usr/share/locale/en_CA.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_CA.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_CA.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_CA.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_CA.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_CA.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_CA.ISO8859-15
OLD_FILES+=usr/share/locale/en_CA.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_CA.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_CA.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_CA.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_CA.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_CA.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_CA.US-ASCII
OLD_FILES+=usr/share/locale/en_CA.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/en_CA.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/en_CA.US-ASCII/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_CA.US-ASCII/LC_MONETARY
OLD_FILES+=usr/share/locale/en_CA.US-ASCII/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_CA.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/en_CA.UTF-8
OLD_FILES+=usr/share/locale/en_CA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_CA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_CA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_CA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_CA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_CA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_GB.ISO8859-1
OLD_FILES+=usr/share/locale/en_GB.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_GB.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_GB.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_GB.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_GB.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_GB.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_GB.ISO8859-15
OLD_FILES+=usr/share/locale/en_GB.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_GB.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_GB.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_GB.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_GB.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_GB.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_GB.US-ASCII
OLD_FILES+=usr/share/locale/en_GB.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/en_GB.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/en_GB.US-ASCII/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_GB.US-ASCII/LC_MONETARY
OLD_FILES+=usr/share/locale/en_GB.US-ASCII/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_GB.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/en_GB.UTF-8
OLD_FILES+=usr/share/locale/en_GB.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_GB.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_GB.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_GB.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_GB.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_GB.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_HK.ISO8859-1
OLD_FILES+=usr/share/locale/en_HK.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_HK.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_HK.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_HK.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_HK.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_HK.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_HK.UTF-8
OLD_FILES+=usr/share/locale/en_HK.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_HK.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_HK.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_HK.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_HK.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_HK.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_IE.ISO8859-1
OLD_FILES+=usr/share/locale/en_IE.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_IE.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_IE.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_IE.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_IE.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_IE.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_IE.ISO8859-15
OLD_FILES+=usr/share/locale/en_IE.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_IE.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_IE.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_IE.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_IE.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_IE.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_IE.UTF-8
OLD_FILES+=usr/share/locale/en_IE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_IE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_IE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_IE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_IE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_IE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_NZ.ISO8859-1
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_NZ.ISO8859-15
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_NZ.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_NZ.US-ASCII
OLD_FILES+=usr/share/locale/en_NZ.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/en_NZ.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/en_NZ.US-ASCII/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_NZ.US-ASCII/LC_MONETARY
OLD_FILES+=usr/share/locale/en_NZ.US-ASCII/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_NZ.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/en_NZ.UTF-8
OLD_FILES+=usr/share/locale/en_NZ.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_NZ.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_NZ.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_NZ.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_NZ.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_NZ.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_PH.UTF-8
OLD_FILES+=usr/share/locale/en_PH.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_PH.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_PH.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_PH.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_PH.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_PH.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_SG.ISO8859-1
OLD_FILES+=usr/share/locale/en_SG.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_SG.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_SG.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_SG.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_SG.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_SG.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_SG.UTF-8
OLD_FILES+=usr/share/locale/en_SG.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_SG.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_SG.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_SG.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_SG.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_SG.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_US.ISO8859-1
OLD_FILES+=usr/share/locale/en_US.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_US.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_US.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_US.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_US.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_US.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_US.ISO8859-15
OLD_FILES+=usr/share/locale/en_US.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_US.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_US.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_US.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_US.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_US.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_US.US-ASCII
OLD_FILES+=usr/share/locale/en_US.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/en_US.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/en_US.US-ASCII/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_US.US-ASCII/LC_MONETARY
OLD_FILES+=usr/share/locale/en_US.US-ASCII/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_US.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/en_US.UTF-8
OLD_FILES+=usr/share/locale/en_US.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_US.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_US.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_US.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_US.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_US.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/en_ZA.ISO8859-1
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/en_ZA.ISO8859-15
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_ZA.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/en_ZA.US-ASCII
OLD_FILES+=usr/share/locale/en_ZA.US-ASCII/LC_COLLATE
OLD_FILES+=usr/share/locale/en_ZA.US-ASCII/LC_CTYPE
OLD_FILES+=usr/share/locale/en_ZA.US-ASCII/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_ZA.US-ASCII/LC_MONETARY
OLD_FILES+=usr/share/locale/en_ZA.US-ASCII/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_ZA.US-ASCII/LC_TIME
OLD_DIRS+=usr/share/locale/en_ZA.UTF-8
OLD_FILES+=usr/share/locale/en_ZA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/en_ZA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/en_ZA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/en_ZA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/en_ZA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/en_ZA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/es_AR.ISO8859-1
OLD_FILES+=usr/share/locale/es_AR.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/es_AR.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/es_AR.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_AR.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/es_AR.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_AR.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/es_AR.UTF-8
OLD_FILES+=usr/share/locale/es_AR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/es_AR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/es_AR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_AR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/es_AR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_AR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/es_CR.UTF-8
OLD_FILES+=usr/share/locale/es_CR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/es_CR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/es_CR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_CR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/es_CR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_CR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/es_ES.ISO8859-1
OLD_FILES+=usr/share/locale/es_ES.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/es_ES.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/es_ES.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_ES.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/es_ES.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_ES.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/es_ES.ISO8859-15
OLD_FILES+=usr/share/locale/es_ES.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/es_ES.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/es_ES.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_ES.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/es_ES.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_ES.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/es_ES.UTF-8
OLD_FILES+=usr/share/locale/es_ES.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/es_ES.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/es_ES.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_ES.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/es_ES.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_ES.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/es_MX.ISO8859-1
OLD_FILES+=usr/share/locale/es_MX.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/es_MX.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/es_MX.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_MX.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/es_MX.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_MX.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/es_MX.UTF-8
OLD_FILES+=usr/share/locale/es_MX.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/es_MX.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/es_MX.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/es_MX.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/es_MX.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/es_MX.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/et_EE.ISO8859-1
OLD_FILES+=usr/share/locale/et_EE.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/et_EE.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/et_EE.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/et_EE.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/et_EE.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/et_EE.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/et_EE.ISO8859-15
OLD_FILES+=usr/share/locale/et_EE.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/et_EE.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/et_EE.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/et_EE.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/et_EE.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/et_EE.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/et_EE.UTF-8
OLD_FILES+=usr/share/locale/et_EE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/et_EE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/et_EE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/et_EE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/et_EE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/et_EE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/eu_ES.ISO8859-1
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/eu_ES.ISO8859-15
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/eu_ES.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/eu_ES.UTF-8
OLD_FILES+=usr/share/locale/eu_ES.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/eu_ES.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/eu_ES.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/eu_ES.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/eu_ES.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/eu_ES.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/fi_FI.ISO8859-1
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/fi_FI.ISO8859-15
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/fi_FI.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/fi_FI.UTF-8
OLD_FILES+=usr/share/locale/fi_FI.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/fi_FI.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/fi_FI.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/fi_FI.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/fi_FI.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/fi_FI.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/fr_BE.ISO8859-1
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/fr_BE.ISO8859-15
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_BE.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/fr_BE.UTF-8
OLD_FILES+=usr/share/locale/fr_BE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_BE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_BE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_BE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_BE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_BE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/fr_CA.ISO8859-1
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/fr_CA.ISO8859-15
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_CA.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/fr_CA.UTF-8
OLD_FILES+=usr/share/locale/fr_CA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_CA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_CA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_CA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_CA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_CA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/fr_CH.ISO8859-1
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/fr_CH.ISO8859-15
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_CH.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/fr_CH.UTF-8
OLD_FILES+=usr/share/locale/fr_CH.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_CH.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_CH.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_CH.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_CH.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_CH.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/fr_FR.ISO8859-1
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/fr_FR.ISO8859-15
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_FR.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/fr_FR.UTF-8
OLD_FILES+=usr/share/locale/fr_FR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/fr_FR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/fr_FR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/fr_FR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/fr_FR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/fr_FR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/he_IL.UTF-8
OLD_FILES+=usr/share/locale/he_IL.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/he_IL.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/he_IL.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/he_IL.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/he_IL.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/he_IL.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/hi_IN.ISCII-DEV
OLD_FILES+=usr/share/locale/hi_IN.ISCII-DEV/LC_COLLATE
OLD_FILES+=usr/share/locale/hi_IN.ISCII-DEV/LC_CTYPE
OLD_FILES+=usr/share/locale/hi_IN.ISCII-DEV/LC_MESSAGES
OLD_FILES+=usr/share/locale/hi_IN.ISCII-DEV/LC_MONETARY
OLD_FILES+=usr/share/locale/hi_IN.ISCII-DEV/LC_NUMERIC
OLD_FILES+=usr/share/locale/hi_IN.ISCII-DEV/LC_TIME
OLD_DIRS+=usr/share/locale/hi_IN.UTF-8
OLD_FILES+=usr/share/locale/hi_IN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/hi_IN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/hi_IN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/hi_IN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/hi_IN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/hi_IN.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/hr_HR.ISO8859-2
OLD_FILES+=usr/share/locale/hr_HR.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/hr_HR.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/hr_HR.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/hr_HR.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/hr_HR.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/hr_HR.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/hr_HR.UTF-8
OLD_FILES+=usr/share/locale/hr_HR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/hr_HR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/hr_HR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/hr_HR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/hr_HR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/hr_HR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/hu_HU.ISO8859-2
OLD_FILES+=usr/share/locale/hu_HU.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/hu_HU.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/hu_HU.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/hu_HU.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/hu_HU.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/hu_HU.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/hu_HU.UTF-8
OLD_FILES+=usr/share/locale/hu_HU.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/hu_HU.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/hu_HU.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/hu_HU.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/hu_HU.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/hu_HU.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/hy_AM.ARMSCII-8
OLD_FILES+=usr/share/locale/hy_AM.ARMSCII-8/LC_COLLATE
OLD_FILES+=usr/share/locale/hy_AM.ARMSCII-8/LC_CTYPE
OLD_FILES+=usr/share/locale/hy_AM.ARMSCII-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/hy_AM.ARMSCII-8/LC_MONETARY
OLD_FILES+=usr/share/locale/hy_AM.ARMSCII-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/hy_AM.ARMSCII-8/LC_TIME
OLD_DIRS+=usr/share/locale/hy_AM.UTF-8
OLD_FILES+=usr/share/locale/hy_AM.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/hy_AM.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/hy_AM.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/hy_AM.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/hy_AM.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/hy_AM.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/is_IS.ISO8859-1
OLD_FILES+=usr/share/locale/is_IS.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/is_IS.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/is_IS.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/is_IS.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/is_IS.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/is_IS.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/is_IS.ISO8859-15
OLD_FILES+=usr/share/locale/is_IS.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/is_IS.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/is_IS.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/is_IS.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/is_IS.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/is_IS.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/is_IS.UTF-8
OLD_FILES+=usr/share/locale/is_IS.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/is_IS.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/is_IS.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/is_IS.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/is_IS.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/is_IS.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/it_CH.ISO8859-1
OLD_FILES+=usr/share/locale/it_CH.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/it_CH.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/it_CH.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/it_CH.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/it_CH.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/it_CH.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/it_CH.ISO8859-15
OLD_FILES+=usr/share/locale/it_CH.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/it_CH.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/it_CH.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/it_CH.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/it_CH.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/it_CH.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/it_CH.UTF-8
OLD_FILES+=usr/share/locale/it_CH.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/it_CH.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/it_CH.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/it_CH.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/it_CH.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/it_CH.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/it_IT.ISO8859-1
OLD_FILES+=usr/share/locale/it_IT.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/it_IT.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/it_IT.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/it_IT.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/it_IT.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/it_IT.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/it_IT.ISO8859-15
OLD_FILES+=usr/share/locale/it_IT.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/it_IT.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/it_IT.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/it_IT.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/it_IT.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/it_IT.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/it_IT.UTF-8
OLD_FILES+=usr/share/locale/it_IT.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/it_IT.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/it_IT.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/it_IT.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/it_IT.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/it_IT.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ja_JP.eucJP
OLD_FILES+=usr/share/locale/ja_JP.eucJP/LC_COLLATE
OLD_FILES+=usr/share/locale/ja_JP.eucJP/LC_CTYPE
OLD_FILES+=usr/share/locale/ja_JP.eucJP/LC_MESSAGES
OLD_FILES+=usr/share/locale/ja_JP.eucJP/LC_MONETARY
OLD_FILES+=usr/share/locale/ja_JP.eucJP/LC_NUMERIC
OLD_FILES+=usr/share/locale/ja_JP.eucJP/LC_TIME
OLD_DIRS+=usr/share/locale/ja_JP.SJIS
OLD_FILES+=usr/share/locale/ja_JP.SJIS/LC_COLLATE
OLD_FILES+=usr/share/locale/ja_JP.SJIS/LC_CTYPE
OLD_FILES+=usr/share/locale/ja_JP.SJIS/LC_MESSAGES
OLD_FILES+=usr/share/locale/ja_JP.SJIS/LC_MONETARY
OLD_FILES+=usr/share/locale/ja_JP.SJIS/LC_NUMERIC
OLD_FILES+=usr/share/locale/ja_JP.SJIS/LC_TIME
OLD_DIRS+=usr/share/locale/ja_JP.UTF-8
OLD_FILES+=usr/share/locale/ja_JP.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ja_JP.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ja_JP.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ja_JP.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ja_JP.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ja_JP.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/kk_KZ.UTF-8
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/kk_KZ.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ko_KR.CP949
OLD_FILES+=usr/share/locale/ko_KR.CP949/LC_COLLATE
OLD_FILES+=usr/share/locale/ko_KR.CP949/LC_CTYPE
OLD_FILES+=usr/share/locale/ko_KR.CP949/LC_MESSAGES
OLD_FILES+=usr/share/locale/ko_KR.CP949/LC_MONETARY
OLD_FILES+=usr/share/locale/ko_KR.CP949/LC_NUMERIC
OLD_FILES+=usr/share/locale/ko_KR.CP949/LC_TIME
OLD_DIRS+=usr/share/locale/ko_KR.eucKR
OLD_FILES+=usr/share/locale/ko_KR.eucKR/LC_COLLATE
OLD_FILES+=usr/share/locale/ko_KR.eucKR/LC_CTYPE
OLD_FILES+=usr/share/locale/ko_KR.eucKR/LC_MESSAGES
OLD_FILES+=usr/share/locale/ko_KR.eucKR/LC_MONETARY
OLD_FILES+=usr/share/locale/ko_KR.eucKR/LC_NUMERIC
OLD_FILES+=usr/share/locale/ko_KR.eucKR/LC_TIME
OLD_DIRS+=usr/share/locale/ko_KR.UTF-8
OLD_FILES+=usr/share/locale/ko_KR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ko_KR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ko_KR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ko_KR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ko_KR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ko_KR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/lt_LT.ISO8859-13
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-13/LC_COLLATE
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-13/LC_CTYPE
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-13/LC_MESSAGES
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-13/LC_MONETARY
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-13/LC_NUMERIC
OLD_FILES+=usr/share/locale/lt_LT.ISO8859-13/LC_TIME
OLD_DIRS+=usr/share/locale/lt_LT.UTF-8
OLD_FILES+=usr/share/locale/lt_LT.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/lt_LT.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/lt_LT.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/lt_LT.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/lt_LT.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/lt_LT.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/lv_LV.ISO8859-13
OLD_FILES+=usr/share/locale/lv_LV.ISO8859-13/LC_COLLATE
OLD_FILES+=usr/share/locale/lv_LV.ISO8859-13/LC_CTYPE
OLD_FILES+=usr/share/locale/lv_LV.ISO8859-13/LC_MESSAGES
OLD_FILES+=usr/share/locale/lv_LV.ISO8859-13/LC_MONETARY
OLD_FILES+=usr/share/locale/lv_LV.ISO8859-13/LC_NUMERIC
OLD_FILES+=usr/share/locale/lv_LV.ISO8859-13/LC_TIME
OLD_DIRS+=usr/share/locale/lv_LV.UTF-8
OLD_FILES+=usr/share/locale/lv_LV.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/lv_LV.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/lv_LV.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/lv_LV.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/lv_LV.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/lv_LV.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/mn_MN.UTF-8
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/mn_MN.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/nb_NO.ISO8859-1
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/nb_NO.ISO8859-15
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/nb_NO.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/nb_NO.UTF-8
OLD_FILES+=usr/share/locale/nb_NO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/nb_NO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/nb_NO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/nb_NO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/nb_NO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/nb_NO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/nl_BE.ISO8859-1
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/nl_BE.ISO8859-15
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/nl_BE.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/nl_BE.UTF-8
OLD_FILES+=usr/share/locale/nl_BE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/nl_BE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/nl_BE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/nl_BE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/nl_BE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/nl_BE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/nl_NL.ISO8859-1
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/nl_NL.ISO8859-15
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/nl_NL.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/nl_NL.UTF-8
OLD_FILES+=usr/share/locale/nl_NL.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/nl_NL.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/nl_NL.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/nl_NL.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/nl_NL.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/nl_NL.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/nn_NO.ISO8859-1
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/nn_NO.ISO8859-15
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/nn_NO.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/nn_NO.UTF-8
OLD_FILES+=usr/share/locale/nn_NO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/nn_NO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/nn_NO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/nn_NO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/nn_NO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/nn_NO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/pl_PL.ISO8859-2
OLD_FILES+=usr/share/locale/pl_PL.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/pl_PL.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/pl_PL.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/pl_PL.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/pl_PL.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/pl_PL.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/pl_PL.UTF-8
OLD_FILES+=usr/share/locale/pl_PL.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/pl_PL.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/pl_PL.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/pl_PL.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/pl_PL.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/pl_PL.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/pt_BR.ISO8859-1
OLD_FILES+=usr/share/locale/pt_BR.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/pt_BR.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/pt_BR.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/pt_BR.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/pt_BR.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/pt_BR.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/pt_BR.UTF-8
OLD_FILES+=usr/share/locale/pt_BR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/pt_BR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/pt_BR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/pt_BR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/pt_BR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/pt_BR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/pt_PT.ISO8859-1
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/pt_PT.ISO8859-15
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/pt_PT.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/pt_PT.UTF-8
OLD_FILES+=usr/share/locale/pt_PT.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/pt_PT.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/pt_PT.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/pt_PT.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/pt_PT.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/pt_PT.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ro_RO.ISO8859-2
OLD_FILES+=usr/share/locale/ro_RO.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/ro_RO.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/ro_RO.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/ro_RO.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/ro_RO.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/ro_RO.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/ro_RO.UTF-8
OLD_FILES+=usr/share/locale/ro_RO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ro_RO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ro_RO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ro_RO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ro_RO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ro_RO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/ru_RU.CP1251
OLD_FILES+=usr/share/locale/ru_RU.CP1251/LC_COLLATE
OLD_FILES+=usr/share/locale/ru_RU.CP1251/LC_CTYPE
OLD_FILES+=usr/share/locale/ru_RU.CP1251/LC_MESSAGES
OLD_FILES+=usr/share/locale/ru_RU.CP1251/LC_MONETARY
OLD_FILES+=usr/share/locale/ru_RU.CP1251/LC_NUMERIC
OLD_FILES+=usr/share/locale/ru_RU.CP1251/LC_TIME
OLD_DIRS+=usr/share/locale/ru_RU.CP866
OLD_FILES+=usr/share/locale/ru_RU.CP866/LC_COLLATE
OLD_FILES+=usr/share/locale/ru_RU.CP866/LC_CTYPE
OLD_FILES+=usr/share/locale/ru_RU.CP866/LC_MESSAGES
OLD_FILES+=usr/share/locale/ru_RU.CP866/LC_MONETARY
OLD_FILES+=usr/share/locale/ru_RU.CP866/LC_NUMERIC
OLD_FILES+=usr/share/locale/ru_RU.CP866/LC_TIME
OLD_DIRS+=usr/share/locale/ru_RU.ISO8859-5
OLD_FILES+=usr/share/locale/ru_RU.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/ru_RU.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/ru_RU.ISO8859-5/LC_MESSAGES
OLD_FILES+=usr/share/locale/ru_RU.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/ru_RU.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/ru_RU.ISO8859-5/LC_TIME
OLD_DIRS+=usr/share/locale/ru_RU.KOI8-R
OLD_FILES+=usr/share/locale/ru_RU.KOI8-R/LC_COLLATE
OLD_FILES+=usr/share/locale/ru_RU.KOI8-R/LC_CTYPE
OLD_FILES+=usr/share/locale/ru_RU.KOI8-R/LC_MESSAGES
OLD_FILES+=usr/share/locale/ru_RU.KOI8-R/LC_MONETARY
OLD_FILES+=usr/share/locale/ru_RU.KOI8-R/LC_NUMERIC
OLD_FILES+=usr/share/locale/ru_RU.KOI8-R/LC_TIME
OLD_DIRS+=usr/share/locale/ru_RU.UTF-8
OLD_FILES+=usr/share/locale/ru_RU.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/ru_RU.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/ru_RU.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/ru_RU.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/ru_RU.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/ru_RU.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/se_FI.UTF-8
OLD_FILES+=usr/share/locale/se_FI.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/se_FI.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/se_FI.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/se_FI.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/se_FI.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/se_FI.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/se_NO.UTF-8
OLD_FILES+=usr/share/locale/se_NO.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/se_NO.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/se_NO.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/se_NO.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/se_NO.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/se_NO.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sk_SK.ISO8859-2
OLD_FILES+=usr/share/locale/sk_SK.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/sk_SK.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/sk_SK.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/sk_SK.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/sk_SK.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/sk_SK.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/sk_SK.UTF-8
OLD_FILES+=usr/share/locale/sk_SK.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sk_SK.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sk_SK.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sk_SK.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sk_SK.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sk_SK.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sl_SI.ISO8859-2
OLD_FILES+=usr/share/locale/sl_SI.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/sl_SI.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/sl_SI.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/sl_SI.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/sl_SI.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/sl_SI.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/sl_SI.UTF-8
OLD_FILES+=usr/share/locale/sl_SI.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sl_SI.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sl_SI.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sl_SI.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sl_SI.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sl_SI.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sr_RS.ISO8859-5
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-5/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-5/LC_TIME
OLD_DIRS+=usr/share/locale/sr_RS.UTF-8
OLD_FILES+=usr/share/locale/sr_RS.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_RS.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_RS.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_RS.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_RS.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_RS.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sr_RS.ISO8859-2
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-2/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-2/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-2/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-2/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-2/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_RS.ISO8859-2/LC_TIME
OLD_DIRS+=usr/share/locale/sr_RS.UTF-8@latin
OLD_FILES+=usr/share/locale/sr_RS.UTF-8@latin/LC_COLLATE
OLD_FILES+=usr/share/locale/sr_RS.UTF-8@latin/LC_CTYPE
OLD_FILES+=usr/share/locale/sr_RS.UTF-8@latin/LC_MESSAGES
OLD_FILES+=usr/share/locale/sr_RS.UTF-8@latin/LC_MONETARY
OLD_FILES+=usr/share/locale/sr_RS.UTF-8@latin/LC_NUMERIC
OLD_FILES+=usr/share/locale/sr_RS.UTF-8@latin/LC_TIME
OLD_DIRS+=usr/share/locale/sv_FI.ISO8859-1
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/sv_FI.ISO8859-15
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/sv_FI.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/sv_FI.UTF-8
OLD_FILES+=usr/share/locale/sv_FI.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sv_FI.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sv_FI.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sv_FI.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sv_FI.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sv_FI.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/sv_SE.ISO8859-1
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-1/LC_COLLATE
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-1/LC_CTYPE
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-1/LC_MESSAGES
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-1/LC_MONETARY
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-1/LC_NUMERIC
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-1/LC_TIME
OLD_DIRS+=usr/share/locale/sv_SE.ISO8859-15
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-15/LC_COLLATE
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-15/LC_CTYPE
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-15/LC_MESSAGES
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-15/LC_MONETARY
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-15/LC_NUMERIC
OLD_FILES+=usr/share/locale/sv_SE.ISO8859-15/LC_TIME
OLD_DIRS+=usr/share/locale/sv_SE.UTF-8
OLD_FILES+=usr/share/locale/sv_SE.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/sv_SE.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/sv_SE.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/sv_SE.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/sv_SE.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/sv_SE.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/tr_TR.ISO8859-9
OLD_FILES+=usr/share/locale/tr_TR.ISO8859-9/LC_COLLATE
OLD_FILES+=usr/share/locale/tr_TR.ISO8859-9/LC_CTYPE
OLD_FILES+=usr/share/locale/tr_TR.ISO8859-9/LC_MESSAGES
OLD_FILES+=usr/share/locale/tr_TR.ISO8859-9/LC_MONETARY
OLD_FILES+=usr/share/locale/tr_TR.ISO8859-9/LC_NUMERIC
OLD_FILES+=usr/share/locale/tr_TR.ISO8859-9/LC_TIME
OLD_DIRS+=usr/share/locale/tr_TR.UTF-8
OLD_FILES+=usr/share/locale/tr_TR.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/tr_TR.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/tr_TR.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/tr_TR.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/tr_TR.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/tr_TR.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/uk_UA.CP1251
OLD_FILES+=usr/share/locale/uk_UA.CP1251/LC_COLLATE
OLD_FILES+=usr/share/locale/uk_UA.CP1251/LC_CTYPE
OLD_FILES+=usr/share/locale/uk_UA.CP1251/LC_MESSAGES
OLD_FILES+=usr/share/locale/uk_UA.CP1251/LC_MONETARY
OLD_FILES+=usr/share/locale/uk_UA.CP1251/LC_NUMERIC
OLD_FILES+=usr/share/locale/uk_UA.CP1251/LC_TIME
OLD_DIRS+=usr/share/locale/uk_UA.ISO8859-5
OLD_FILES+=usr/share/locale/uk_UA.ISO8859-5/LC_COLLATE
OLD_FILES+=usr/share/locale/uk_UA.ISO8859-5/LC_CTYPE
OLD_FILES+=usr/share/locale/uk_UA.ISO8859-5/LC_MESSAGES
OLD_FILES+=usr/share/locale/uk_UA.ISO8859-5/LC_MONETARY
OLD_FILES+=usr/share/locale/uk_UA.ISO8859-5/LC_NUMERIC
OLD_FILES+=usr/share/locale/uk_UA.ISO8859-5/LC_TIME
OLD_DIRS+=usr/share/locale/uk_UA.KOI8-U
OLD_FILES+=usr/share/locale/uk_UA.KOI8-U/LC_COLLATE
OLD_FILES+=usr/share/locale/uk_UA.KOI8-U/LC_CTYPE
OLD_FILES+=usr/share/locale/uk_UA.KOI8-U/LC_MESSAGES
OLD_FILES+=usr/share/locale/uk_UA.KOI8-U/LC_MONETARY
OLD_FILES+=usr/share/locale/uk_UA.KOI8-U/LC_NUMERIC
OLD_FILES+=usr/share/locale/uk_UA.KOI8-U/LC_TIME
OLD_DIRS+=usr/share/locale/uk_UA.UTF-8
OLD_FILES+=usr/share/locale/uk_UA.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/uk_UA.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/uk_UA.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/uk_UA.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/uk_UA.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/uk_UA.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_CN.eucCN
OLD_FILES+=usr/share/locale/zh_CN.eucCN/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_CN.eucCN/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_CN.eucCN/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_CN.eucCN/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_CN.eucCN/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_CN.eucCN/LC_TIME
OLD_DIRS+=usr/share/locale/zh_CN.GB18030
OLD_FILES+=usr/share/locale/zh_CN.GB18030/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_CN.GB18030/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_CN.GB18030/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_CN.GB18030/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_CN.GB18030/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_CN.GB18030/LC_TIME
OLD_DIRS+=usr/share/locale/zh_CN.GB2312
OLD_FILES+=usr/share/locale/zh_CN.GB2312/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_CN.GB2312/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_CN.GB2312/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_CN.GB2312/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_CN.GB2312/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_CN.GB2312/LC_TIME
OLD_DIRS+=usr/share/locale/zh_CN.GBK
OLD_FILES+=usr/share/locale/zh_CN.GBK/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_CN.GBK/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_CN.GBK/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_CN.GBK/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_CN.GBK/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_CN.GBK/LC_TIME
OLD_DIRS+=usr/share/locale/zh_CN.UTF-8
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_CN.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_HK.UTF-8
OLD_FILES+=usr/share/locale/zh_HK.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_HK.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_HK.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_HK.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_HK.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_HK.UTF-8/LC_TIME
OLD_DIRS+=usr/share/locale/zh_TW.Big5
OLD_FILES+=usr/share/locale/zh_TW.Big5/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_TW.Big5/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_TW.Big5/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_TW.Big5/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_TW.Big5/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_TW.Big5/LC_TIME
OLD_DIRS+=usr/share/locale/zh_TW.UTF-8
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/LC_COLLATE
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/LC_CTYPE
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/LC_MESSAGES
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/LC_MONETARY
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/LC_NUMERIC
OLD_FILES+=usr/share/locale/zh_TW.UTF-8/LC_TIME
.endif
.if ${MK_LOCATE} == no
OLD_FILES+=etc/locate.rc
OLD_FILES+=etc/periodic/weekly/310.locate
OLD_FILES+=usr/bin/locate
OLD_FILES+=usr/libexec/locate.bigram
OLD_FILES+=usr/libexec/locate.code
OLD_FILES+=usr/libexec/locate.concatdb
OLD_FILES+=usr/libexec/locate.mklocatedb
OLD_FILES+=usr/libexec/locate.updatedb
OLD_FILES+=usr/share/man/man1/locate.1.gz
OLD_FILES+=usr/share/man/man8/locate.updatedb.8.gz
OLD_FILES+=usr/share/man/man8/updatedb.8.gz
.endif
.if ${MK_LPR} == no
OLD_FILES+=etc/hosts.lpd
OLD_FILES+=etc/printcap
OLD_FILES+=etc/newsyslog.conf.d/lpr.conf
OLD_FILES+=etc/rc.d/lpd
OLD_FILES+=etc/syslog.d/lpr.conf
OLD_FILES+=usr/bin/lp
OLD_FILES+=usr/bin/lpq
OLD_FILES+=usr/bin/lpr
OLD_FILES+=usr/bin/lprm
OLD_FILES+=usr/libexec/lpr/ru/bjc-240.sh.sample
OLD_FILES+=usr/libexec/lpr/ru/koi2alt
OLD_FILES+=usr/libexec/lpr/ru/koi2855
OLD_DIRS+=usr/libexec/lpr/ru
OLD_FILES+=usr/libexec/lpr/lpf
OLD_DIRS+=usr/libexec/lpr
OLD_FILES+=usr/sbin/chkprintcap
OLD_FILES+=usr/sbin/lpc
OLD_FILES+=usr/sbin/lpd
OLD_FILES+=usr/sbin/lptest
OLD_FILES+=usr/sbin/pac
OLD_FILES+=usr/share/doc/smm/07.lpd/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/07.lpd
OLD_FILES+=usr/share/examples/etc/hosts.lpd
OLD_FILES+=usr/share/examples/etc/printcap
OLD_FILES+=usr/share/man/man1/lp.1.gz
OLD_FILES+=usr/share/man/man1/lpq.1.gz
OLD_FILES+=usr/share/man/man1/lpr.1.gz
OLD_FILES+=usr/share/man/man1/lprm.1.gz
OLD_FILES+=usr/share/man/man1/lptest.1.gz
OLD_FILES+=usr/share/man/man5/printcap.5.gz
OLD_FILES+=usr/share/man/man8/chkprintcap.8.gz
OLD_FILES+=usr/share/man/man8/lpc.8.gz
OLD_FILES+=usr/share/man/man8/lpd.8.gz
OLD_FILES+=usr/share/man/man8/pac.8.gz
.endif
.if ${MK_MAIL} == no
OLD_FILES+=etc/aliases
OLD_FILES+=etc/mail.rc
OLD_FILES+=etc/mail/aliases
OLD_FILES+=etc/mail/mailer.conf
OLD_FILES+=etc/periodic/daily/130.clean-msgs
OLD_FILES+=usr/bin/Mail
OLD_FILES+=usr/bin/biff
OLD_FILES+=usr/bin/from
OLD_FILES+=usr/bin/mail
OLD_FILES+=usr/bin/mailx
OLD_FILES+=usr/bin/msgs
OLD_FILES+=usr/libexec/comsat
OLD_FILES+=usr/share/examples/etc/mail.rc
OLD_FILES+=usr/share/man/man1/Mail.1.gz
OLD_FILES+=usr/share/man/man1/biff.1.gz
OLD_FILES+=usr/share/man/man1/from.1.gz
OLD_FILES+=usr/share/man/man1/mail.1.gz
OLD_FILES+=usr/share/man/man1/mailx.1.gz
OLD_FILES+=usr/share/man/man1/msgs.1.gz
OLD_FILES+=usr/share/man/man8/comsat.8.gz
OLD_FILES+=usr/share/misc/mail.help
OLD_FILES+=usr/share/misc/mail.tildehelp
.endif
.if ${MK_MAILWRAPPER} == no
OLD_FILES+=etc/mail/mailer.conf
# Don't remove, for no mailwrapper case:
# /usr/sbin/sendmail -> /usr/sbin/mailwrapper
# /usr/sbin/mailwrapper -> /usr/libexec/sendmail/sendmail
#OLD_FILES+=usr/sbin/mailwrapper
OLD_FILES+=usr/share/man/man8/mailwrapper.8.gz
.endif
.if ${MK_MAKE} == no
OLD_FILES+=usr/bin/make
OLD_FILES+=usr/share/man/man1/make.1.gz
OLD_FILES+=usr/share/mk/atf.test.mk
OLD_FILES+=usr/share/mk/bsd.README
OLD_FILES+=usr/share/mk/bsd.arch.inc.mk
OLD_FILES+=usr/share/mk/bsd.compiler.mk
OLD_FILES+=usr/share/mk/bsd.cpu.mk
OLD_FILES+=usr/share/mk/bsd.crunchgen.mk
OLD_FILES+=usr/share/mk/bsd.dep.mk
OLD_FILES+=usr/share/mk/bsd.doc.mk
OLD_FILES+=usr/share/mk/bsd.dtb.mk
OLD_FILES+=usr/share/mk/bsd.endian.mk
OLD_FILES+=usr/share/mk/bsd.files.mk
OLD_FILES+=usr/share/mk/bsd.incs.mk
OLD_FILES+=usr/share/mk/bsd.info.mk
OLD_FILES+=usr/share/mk/bsd.init.mk
OLD_FILES+=usr/share/mk/bsd.kmod.mk
OLD_FILES+=usr/share/mk/bsd.lib.mk
OLD_FILES+=usr/share/mk/bsd.libnames.mk
OLD_FILES+=usr/share/mk/bsd.links.mk
OLD_FILES+=usr/share/mk/bsd.man.mk
OLD_FILES+=usr/share/mk/bsd.mkopt.mk
OLD_FILES+=usr/share/mk/bsd.nls.mk
OLD_FILES+=usr/share/mk/bsd.obj.mk
OLD_FILES+=usr/share/mk/bsd.opts.mk
OLD_FILES+=usr/share/mk/bsd.own.mk
OLD_FILES+=usr/share/mk/bsd.port.mk
OLD_FILES+=usr/share/mk/bsd.port.options.mk
OLD_FILES+=usr/share/mk/bsd.port.post.mk
OLD_FILES+=usr/share/mk/bsd.port.pre.mk
OLD_FILES+=usr/share/mk/bsd.port.subdir.mk
OLD_FILES+=usr/share/mk/bsd.prog.mk
OLD_FILES+=usr/share/mk/bsd.progs.mk
OLD_FILES+=usr/share/mk/bsd.snmpmod.mk
OLD_FILES+=usr/share/mk/bsd.subdir.mk
OLD_FILES+=usr/share/mk/bsd.symver.mk
OLD_FILES+=usr/share/mk/bsd.sys.mk
OLD_FILES+=usr/share/mk/bsd.test.mk
OLD_FILES+=usr/share/mk/plain.test.mk
OLD_FILES+=usr/share/mk/suite.test.mk
OLD_FILES+=usr/share/mk/sys.mk
OLD_FILES+=usr/share/mk/tap.test.mk
OLD_FILES+=usr/share/mk/version_gen.awk
OLD_FILES+=usr/tests/usr.bin/bmake/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/archives/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.status.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stderr.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/expected.stdout.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd/libtest.a
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.status.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stderr.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/expected.stdout.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_44bsd_mod/libtest.a
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.status.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stderr.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.6
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/expected.stdout.7
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/archives/fmt_oldbsd/libtest.a
OLD_FILES+=usr/tests/usr.bin/bmake/basic/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t0/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t0/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t0/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t0/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t1/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t1/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t2/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t2/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t2/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t2/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t2/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t3/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t3/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t3/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t3/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/basic/t3/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/common.sh
OLD_FILES+=usr/tests/usr.bin/bmake/execution/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/execution/ellipsis/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/execution/ellipsis/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/ellipsis/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/ellipsis/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/ellipsis/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/ellipsis/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/empty/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/execution/empty/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/empty/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/empty/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/empty/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/empty/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/joberr/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/execution/joberr/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/joberr/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/joberr/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/joberr/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/joberr/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/plus/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/execution/plus/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/execution/plus/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/plus/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/plus/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/execution/plus/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/builtin/sh
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/meta/sh
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path/sh
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/path_select/shell
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/replace/shell
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/shell/select/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/TEST1.a
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/basic/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/TEST1.a
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/TEST2.a
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild1/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/TEST1.a
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/TEST2.a
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/suffixes/src_wild2/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/directive-t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/directive-t0/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/directive-t0/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/directive-t0/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/directive-t0/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/directive-t0/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.status.3
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.status.4
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.status.5
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/enl/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/funny-targets/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/syntax/semi/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/1/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/1/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/mk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t0/mk/sys.mk
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/1/cleanup
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/1/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/mk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t1/mk/sys.mk
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/1/cleanup
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/1/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/mk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/sysmk/t2/mk/sys.mk
OLD_FILES+=usr/tests/usr.bin/bmake/test-new.mk
OLD_FILES+=usr/tests/usr.bin/bmake/variables/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_M/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_M/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_M/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_M/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_M/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_M/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.status.3
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/bmake/variables/modifier_t/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/expected.status.2
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/bmake/variables/opt_V/legacy_test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/bmake/variables/t0/Makefile.test
OLD_FILES+=usr/tests/usr.bin/bmake/variables/t0/expected.status.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/t0/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/t0/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/bmake/variables/t0/legacy_test
.endif
.if ${MK_MAN} == no
MAN_FILES!=find ${DESTDIR}/usr/share/man ${DESTDIR}/usr/share/openssl/man -type f | sed -e 's,^${DESTDIR}/,,'; echo
OLD_FILES+=${MAN_FILES}
MAN_DIRS!=find ${DESTDIR}/usr/share/man ${DESTDIR}/usr/share/openssl/man -type d | sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${MAN_DIRS}
.endif
.if ${MK_MAN_UTILS} == no
OLD_FILES+=etc/periodic/weekly/320.whatis
OLD_FILES+=usr/bin/apropos
OLD_FILES+=usr/bin/makewhatis
OLD_FILES+=usr/bin/man
OLD_FILES+=usr/bin/manpath
OLD_FILES+=usr/bin/whatis
OLD_FILES+=usr/libexec/makewhatis.local
OLD_FILES+=usr/sbin/manctl
OLD_FILES+=usr/share/man/man1/apropos.1.gz
OLD_FILES+=usr/share/man/man1/makewhatis.1.gz
OLD_FILES+=usr/share/man/man1/man.1.gz
OLD_FILES+=usr/share/man/man1/manpath.1.gz
OLD_FILES+=usr/share/man/man1/whatis.1.gz
OLD_FILES+=usr/share/man/man5/man.conf.5.gz
OLD_FILES+=usr/share/man/man8/makewhatis.local.8.gz
OLD_FILES+=usr/share/man/man8/manctl.8.gz
OLD_FILES+=usr/share/man/whatis
OLD_FILES+=usr/share/openssl/man/whatis
.endif
.if ${MK_NDIS} == no
OLD_FILES+=usr/sbin/ndiscvt
OLD_FILES+=usr/sbin/ndisgen
OLD_FILES+=usr/share/man/man8/ndiscvt.8.gz
OLD_FILES+=usr/share/man/man8/ndisgen.8.gz
OLD_FILES+=usr/share/misc/windrv_stub.c
.endif
.if ${MK_NETCAT} == no
OLD_FILES+=rescue/nc
OLD_FILES+=usr/bin/nc
OLD_FILES+=usr/share/man/man1/nc.1.gz
.endif
.if ${MK_NETGRAPH} == no
OLD_FILES+=usr/include/netgraph.h
OLD_FILES+=usr/lib/libnetgraph.a
OLD_FILES+=usr/lib/libnetgraph.so
OLD_LIBS+=usr/lib/libnetgraph.so.4
OLD_FILES+=usr/lib/libnetgraph_p.a
OLD_FILES+=usr/lib32/libnetgraph.a
OLD_FILES+=usr/lib32/libnetgraph.so
OLD_LIBS+=usr/lib32/libnetgraph.so.4
OLD_FILES+=usr/lib32/libnetgraph_p.a
OLD_FILES+=usr/libexec/pppoed
OLD_FILES+=usr/sbin/flowctl
OLD_FILES+=usr/sbin/lmcconfig
OLD_FILES+=usr/sbin/ngctl
OLD_FILES+=usr/sbin/nghook
OLD_FILES+=usr/share/man/man3/NgAllocRecvAsciiMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgAllocRecvData.3.gz
OLD_FILES+=usr/share/man/man3/NgAllocRecvMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgMkSockNode.3.gz
OLD_FILES+=usr/share/man/man3/NgNameNode.3.gz
OLD_FILES+=usr/share/man/man3/NgRecvAsciiMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgRecvData.3.gz
OLD_FILES+=usr/share/man/man3/NgRecvMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgSendAsciiMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgSendData.3.gz
OLD_FILES+=usr/share/man/man3/NgSendMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgSendReplyMsg.3.gz
OLD_FILES+=usr/share/man/man3/NgSetDebug.3.gz
OLD_FILES+=usr/share/man/man3/NgSetErrLog.3.gz
OLD_FILES+=usr/share/man/man3/netgraph.3.gz
OLD_FILES+=usr/share/man/man8/flowctl.8.gz
OLD_FILES+=usr/share/man/man8/lmcconfig.8.gz
OLD_FILES+=usr/share/man/man8/ngctl.8.gz
OLD_FILES+=usr/share/man/man8/nghook.8.gz
OLD_FILES+=usr/share/man/man8/pppoed.8.gz
.endif
.if ${MK_NETGRAPH_SUPPORT} == no
OLD_FILES+=usr/include/bsnmp/snmp_netgraph.h
OLD_FILES+=usr/lib/snmp_netgraph.so
OLD_LIBS+=usr/lib/snmp_netgraph.so.6
OLD_FILES+=usr/share/man/man3/snmp_netgraph.3.gz
OLD_FILES+=usr/share/snmp/defs/netgraph_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-NETGRAPH.txt
.endif
.if ${MK_NIS} == no
OLD_FILES+=etc/rc.d/ypbind
OLD_FILES+=etc/rc.d/ypldap
OLD_FILES+=etc/rc.d/yppasswdd
OLD_FILES+=etc/rc.d/ypserv
OLD_FILES+=etc/rc.d/ypset
OLD_FILES+=etc/rc.d/ypupdated
OLD_FILES+=etc/rc.d/ypxfrd
OLD_FILES+=usr/bin/ypcat
OLD_FILES+=usr/bin/ypchfn
OLD_FILES+=usr/bin/ypchpass
OLD_FILES+=usr/bin/ypchsh
OLD_FILES+=usr/bin/ypmatch
OLD_FILES+=usr/bin/yppasswd
OLD_FILES+=usr/bin/ypwhich
OLD_FILES+=usr/include/ypclnt.h
OLD_FILES+=usr/lib/libypclnt.a
OLD_FILES+=usr/lib/libypclnt.so
OLD_LIBS+=usr/lib/libypclnt.so.4
OLD_FILES+=usr/lib/libypclnt_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libypclnt.a
OLD_FILES+=usr/lib32/libypclnt.so
OLD_LIBS+=usr/lib32/libypclnt.so.4
OLD_FILES+=usr/lib32/libypclnt_p.a
.endif
OLD_FILES+=usr/libexec/mknetid
OLD_FILES+=usr/libexec/yppwupdate
OLD_FILES+=usr/libexec/ypxfr
OLD_FILES+=usr/sbin/rpc.yppasswdd
OLD_FILES+=usr/sbin/rpc.ypupdated
OLD_FILES+=usr/sbin/rpc.ypxfrd
OLD_FILES+=usr/sbin/yp_mkdb
OLD_FILES+=usr/sbin/ypbind
OLD_FILES+=usr/sbin/ypinit
OLD_FILES+=usr/sbin/yppoll
OLD_FILES+=usr/sbin/yppush
OLD_FILES+=usr/sbin/ypserv
OLD_FILES+=usr/sbin/ypset
OLD_FILES+=usr/share/man/man1/ypcat.1.gz
OLD_FILES+=usr/share/man/man1/ypchfn.1.gz
OLD_FILES+=usr/share/man/man1/ypchpass.1.gz
OLD_FILES+=usr/share/man/man1/ypchsh.1.gz
OLD_FILES+=usr/share/man/man1/ypmatch.1.gz
OLD_FILES+=usr/share/man/man1/yppasswd.1.gz
OLD_FILES+=usr/share/man/man1/ypwhich.1.gz
OLD_FILES+=usr/share/man/man5/netid.5.gz
OLD_FILES+=usr/share/man/man8/mknetid.8.gz
OLD_FILES+=usr/share/man/man8/rpc.yppasswdd.8.gz
OLD_FILES+=usr/share/man/man8/rpc.ypxfrd.8.gz
OLD_FILES+=usr/share/man/man8/NIS.8.gz
OLD_FILES+=usr/share/man/man8/YP.8.gz
OLD_FILES+=usr/share/man/man8/yp.8.gz
OLD_FILES+=usr/share/man/man8/nis.8.gz
OLD_FILES+=usr/share/man/man8/yp_mkdb.8.gz
OLD_FILES+=usr/share/man/man8/ypbind.8.gz
OLD_FILES+=usr/share/man/man8/ypinit.8.gz
OLD_FILES+=usr/share/man/man8/yppoll.8.gz
OLD_FILES+=usr/share/man/man8/yppush.8.gz
OLD_FILES+=usr/share/man/man8/ypserv.8.gz
OLD_FILES+=usr/share/man/man8/ypset.8.gz
OLD_FILES+=usr/share/man/man8/ypxfr.8.gz
OLD_FILES+=var/yp/Makefile
OLD_FILES+=var/yp/Makefile.dist
.endif
.if ${MK_NLS} == no
OLD_DIRS+=usr/share/nls/
OLD_DIRS+=usr/share/nls/C
OLD_FILES+=usr/share/mk/bsd.nls.mk
OLD_FILES+=usr/share/nls/C/ee.cat
OLD_DIRS+=usr/share/nls/af_ZA.ISO8859-1
OLD_DIRS+=usr/share/nls/af_ZA.ISO8859-15
OLD_DIRS+=usr/share/nls/af_ZA.UTF-8
OLD_DIRS+=usr/share/nls/am_ET.UTF-8
OLD_DIRS+=usr/share/nls/be_BY.CP1131
OLD_DIRS+=usr/share/nls/be_BY.CP1251
OLD_DIRS+=usr/share/nls/be_BY.ISO8859-5
OLD_DIRS+=usr/share/nls/be_BY.UTF-8
OLD_FILES+=usr/share/nls/be_BY.UTF-8/libc.cat
OLD_DIRS+=usr/share/nls/bg_BG.CP1251
OLD_DIRS+=usr/share/nls/bg_BG.UTF-8
OLD_DIRS+=usr/share/nls/ca_ES.ISO8859-1
OLD_FILES+=usr/share/nls/ca_ES.ISO8859-1/libc.cat
OLD_DIRS+=usr/share/nls/ca_ES.ISO8859-15
OLD_DIRS+=usr/share/nls/ca_ES.UTF-8
OLD_DIRS+=usr/share/nls/cs_CZ.ISO8859-2
OLD_DIRS+=usr/share/nls/cs_CZ.UTF-8
OLD_DIRS+=usr/share/nls/da_DK.ISO8859-1
OLD_DIRS+=usr/share/nls/da_DK.ISO8859-15
OLD_DIRS+=usr/share/nls/da_DK.UTF-8
OLD_DIRS+=usr/share/nls/de_AT.ISO8859-1
OLD_FILES+=usr/share/nls/de_AT.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/de_AT.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/de_AT.ISO8859-15
OLD_FILES+=usr/share/nls/de_AT.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/de_AT.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/de_AT.UTF-8
OLD_FILES+=usr/share/nls/de_AT.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/de_CH.ISO8859-1
OLD_FILES+=usr/share/nls/de_CH.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/de_CH.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/de_CH.ISO8859-15
OLD_FILES+=usr/share/nls/de_CH.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/de_CH.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/de_CH.UTF-8
OLD_FILES+=usr/share/nls/de_CH.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/de_DE.ISO8859-1
OLD_FILES+=usr/share/nls/de_DE.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/de_DE.ISO8859-1/libc.cat
OLD_FILES+=usr/share/nls/de_DE.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/de_DE.ISO8859-15
OLD_FILES+=usr/share/nls/de_DE.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/de_DE.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/de_DE.UTF-8
OLD_FILES+=usr/share/nls/de_DE.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/el_GR.ISO8859-7
OLD_FILES+=usr/share/nls/el_GR.ISO8859-7/libc.cat
OLD_FILES+=usr/share/nls/el_GR.ISO8859-7/tcsh.cat
OLD_DIRS+=usr/share/nls/el_GR.UTF-8
OLD_FILES+=usr/share/nls/el_GR.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/en_AU.ISO8859-1
OLD_DIRS+=usr/share/nls/en_AU.ISO8859-15
OLD_DIRS+=usr/share/nls/en_AU.US-ASCII
OLD_DIRS+=usr/share/nls/en_AU.UTF-8
OLD_DIRS+=usr/share/nls/en_CA.ISO8859-1
OLD_FILES+=usr/share/nls/en_US.ISO8859-1/ee.cat
OLD_DIRS+=usr/share/nls/en_CA.ISO8859-15
OLD_DIRS+=usr/share/nls/en_CA.US-ASCII
OLD_DIRS+=usr/share/nls/en_CA.UTF-8
OLD_DIRS+=usr/share/nls/en_GB.ISO8859-1
OLD_DIRS+=usr/share/nls/en_GB.ISO8859-15
OLD_DIRS+=usr/share/nls/en_GB.US-ASCII
OLD_DIRS+=usr/share/nls/en_GB.UTF-8
OLD_DIRS+=usr/share/nls/en_IE.UTF-8
OLD_DIRS+=usr/share/nls/en_NZ.ISO8859-1
OLD_DIRS+=usr/share/nls/en_NZ.ISO8859-15
OLD_DIRS+=usr/share/nls/en_NZ.US-ASCII
OLD_DIRS+=usr/share/nls/en_NZ.UTF-8
OLD_DIRS+=usr/share/nls/en_US.ISO8859-1
OLD_DIRS+=usr/share/nls/en_US.ISO8859-15
OLD_FILES+=usr/share/nls/en_US.ISO8859-15/ee.cat
OLD_DIRS+=usr/share/nls/en_US.UTF-8
OLD_DIRS+=usr/share/nls/es_ES.UTF-8
OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/libc.cat
OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/es_ES.ISO8859-1
OLD_DIRS+=usr/share/nls/es_ES.ISO8859-15
OLD_FILES+=usr/share/nls/es_ES.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/es_ES.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/et_EE.ISO8859-15
OLD_FILES+=usr/share/nls/et_EE.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/et_EE.UTF-8
OLD_FILES+=usr/share/nls/et_EE.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/fi_FI.ISO8859-1
OLD_FILES+=usr/share/nls/fi_FI.ISO8859-1/libc.cat
OLD_FILES+=usr/share/nls/fi_FI.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/fi_FI.ISO8859-15
OLD_FILES+=usr/share/nls/fi_FI.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/fi_FI.UTF-8
OLD_FILES+=usr/share/nls/fi_FI.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_BE.ISO8859-1
OLD_FILES+=usr/share/nls/fr_BE.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/fr_BE.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_BE.ISO8859-15
OLD_FILES+=usr/share/nls/fr_BE.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/fr_BE.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_BE.UTF-8
OLD_FILES+=usr/share/nls/fr_BE.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_CA.ISO8859-1
OLD_FILES+=usr/share/nls/fr_CA.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/fr_CA.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_CA.ISO8859-15
OLD_FILES+=usr/share/nls/fr_CA.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/fr_CA.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_CA.UTF-8
OLD_FILES+=usr/share/nls/fr_CA.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_CH.ISO8859-1
OLD_FILES+=usr/share/nls/fr_CH.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/fr_CH.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_CH.ISO8859-15
OLD_FILES+=usr/share/nls/fr_CH.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/fr_CH.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_CH.UTF-8
OLD_FILES+=usr/share/nls/fr_CH.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_FR.ISO8859-1
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-1/libc.cat
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_FR.ISO8859-15
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-15/ee.cat
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/fr_FR.UTF-8
OLD_FILES+=usr/share/nls/fr_FR.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/gl_ES.ISO8859-1
OLD_FILES+=usr/share/nls/gl_ES.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/gl_ES.ISO8859-1/libc.cat
OLD_DIRS+=usr/share/nls/he_IL.UTF-8
OLD_DIRS+=usr/share/nls/hi_IN.ISCII-DEV
OLD_DIRS+=usr/share/nls/hr_HR.ISO8859-2
OLD_DIRS+=usr/share/nls/hu_HU.ISO8859-2
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/ee.cat
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/grep.cat
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/libc.cat
OLD_FILES+=usr/share/nls/hu_HU.ISO8859-2/sort.cat
OLD_DIRS+=usr/share/nls/hr_HR.UTF-8
OLD_DIRS+=usr/share/nls/hu_HU.UTF-8
OLD_DIRS+=usr/share/nls/hy_AM.ARMSCII-8
OLD_DIRS+=usr/share/nls/hy_AM.UTF-8
OLD_DIRS+=usr/share/nls/is_IS.ISO8859-1
OLD_DIRS+=usr/share/nls/is_IS.ISO8859-15
OLD_DIRS+=usr/share/nls/is_IS.UTF-8
OLD_DIRS+=usr/share/nls/it_CH.ISO8859-1
OLD_FILES+=usr/share/nls/it_CH.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/it_CH.ISO8859-15
OLD_FILES+=usr/share/nls/it_CH.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/it_CH.UTF-8
OLD_FILES+=usr/share/nls/it_CH.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/it_IT.ISO8859-1
OLD_FILES+=usr/share/nls/it_IT.ISO8859-1/tcsh.cat
OLD_DIRS+=usr/share/nls/it_IT.ISO8859-15
OLD_FILES+=usr/share/nls/it_IT.ISO8859-15/libc.cat
OLD_FILES+=usr/share/nls/it_IT.ISO8859-15/tcsh.cat
OLD_DIRS+=usr/share/nls/it_IT.UTF-8
OLD_FILES+=usr/share/nls/it_IT.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/ja_JP.SJIS
OLD_FILES+=usr/share/nls/ja_JP.SJIS/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.SJIS/tcsh.cat
OLD_DIRS+=usr/share/nls/ja_JP.UTF-8
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/libc.cat
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/ja_JP.eucJP
OLD_FILES+=usr/share/nls/ja_JP.eucJP/grep.cat
OLD_FILES+=usr/share/nls/ja_JP.eucJP/libc.cat
OLD_FILES+=usr/share/nls/ja_JP.eucJP/tcsh.cat
OLD_DIRS+=usr/share/nls/kk_KZ.PT154
OLD_DIRS+=usr/share/nls/kk_KZ.UTF-8
OLD_DIRS+=usr/share/nls/ko_KR.CP949
OLD_DIRS+=usr/share/nls/ko_KR.UTF-8
OLD_FILES+=usr/share/nls/ko_KR.UTF-8/libc.cat
OLD_DIRS+=usr/share/nls/ko_KR.eucKR
OLD_FILES+=usr/share/nls/ko_KR.eucKR/libc.cat
OLD_DIRS+=usr/share/nls/lv_LV.UTF-8
OLD_DIRS+=usr/share/nls/lt_LT.ISO8859-13
OLD_DIRS+=usr/share/nls/lt_LT.UTF-8
OLD_DIRS+=usr/share/nls/lv_LV.ISO8859-13
OLD_DIRS+=usr/share/nls/mn_MN.UTF-8
OLD_FILES+=usr/share/nls/mn_MN.UTF-8/libc.cat
OLD_DIRS+=usr/share/nls/nl_BE.ISO8859-1
OLD_DIRS+=usr/share/nls/nl_BE.ISO8859-15
OLD_DIRS+=usr/share/nls/nl_BE.UTF-8
OLD_DIRS+=usr/share/nls/no_NO.ISO8859-1
OLD_FILES+=usr/share/nls/nl_NL.ISO8859-1/libc.cat
OLD_DIRS+=usr/share/nls/nl_NL.ISO8859-15
OLD_DIRS+=usr/share/nls/nl_NL.ISO8859-1
OLD_FILES+=usr/share/nls/no_NO.ISO8859-1/libc.cat
OLD_DIRS+=usr/share/nls/no_NO.ISO8859-15
OLD_DIRS+=usr/share/nls/nl_NL.UTF-8
OLD_DIRS+=usr/share/nls/no_NO.UTF-8
OLD_DIRS+=usr/share/nls/pl_PL.ISO8859-2
OLD_FILES+=usr/share/nls/pl_PL.ISO8859-2/ee.cat
OLD_FILES+=usr/share/nls/pl_PL.ISO8859-2/libc.cat
OLD_DIRS+=usr/share/nls/pl_PL.UTF-8
OLD_DIRS+=usr/share/nls/pt_BR.ISO8859-1
OLD_DIRS+=usr/share/nls/pt_BR.UTF-8
OLD_DIRS+=usr/share/nls/pt_PT.ISO8859-1
OLD_FILES+=usr/share/nls/pt_BR.ISO8859-1/ee.cat
OLD_FILES+=usr/share/nls/pt_BR.ISO8859-1/grep.cat
OLD_FILES+=usr/share/nls/pt_BR.ISO8859-1/libc.cat
OLD_FILES+=usr/share/nls/pt_PT.ISO8859-1/ee.cat
OLD_DIRS+=usr/share/nls/pt_PT.ISO8859-15
OLD_DIRS+=usr/share/nls/pt_PT.UTF-8
OLD_DIRS+=usr/share/nls/ro_RO.ISO8859-2
OLD_DIRS+=usr/share/nls/ro_RO.UTF-8
OLD_DIRS+=usr/share/nls/ru_RU.CP1251
OLD_FILES+=usr/share/nls/ru_RU.CP1251/tcsh.cat
OLD_DIRS+=usr/share/nls/ru_RU.CP866
OLD_FILES+=usr/share/nls/ru_RU.CP866/tcsh.cat
OLD_DIRS+=usr/share/nls/ru_RU.ISO8859-5
OLD_FILES+=usr/share/nls/ru_RU.ISO8859-5/tcsh.cat
OLD_DIRS+=usr/share/nls/ru_RU.KOI8-R
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/ee.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/grep.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/libc.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/tcsh.cat
OLD_DIRS+=usr/share/nls/ru_RU.UTF-8
OLD_FILES+=usr/share/nls/ru_RU.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/sk_SK.ISO8859-2
OLD_FILES+=usr/share/nls/sk_SK.ISO8859-2/libc.cat
OLD_DIRS+=usr/share/nls/sk_SK.UTF-8
OLD_DIRS+=usr/share/nls/sl_SI.ISO8859-2
OLD_DIRS+=usr/share/nls/sl_SI.UTF-8
OLD_DIRS+=usr/share/nls/sr_YU.ISO8859-2
OLD_DIRS+=usr/share/nls/sr_YU.ISO8859-5
OLD_DIRS+=usr/share/nls/sr_YU.UTF-8
OLD_DIRS+=usr/share/nls/sv_SE.ISO8859-1
OLD_FILES+=usr/share/nls/sv_SE.ISO8859-1/libc.cat
OLD_DIRS+=usr/share/nls/sv_SE.ISO8859-15
OLD_DIRS+=usr/share/nls/sv_SE.UTF-8
OLD_DIRS+=usr/share/nls/tr_TR.ISO8859-9
OLD_DIRS+=usr/share/nls/tr_TR.UTF-8
OLD_DIRS+=usr/share/nls/uk_UA.ISO8859-5
OLD_FILES+=usr/share/nls/uk_UA.ISO8859-5/tcsh.cat
OLD_DIRS+=usr/share/nls/uk_UA.KOI8-U
OLD_FILES+=usr/share/nls/uk_UA.KOI8-U/ee.cat
OLD_FILES+=usr/share/nls/uk_UA.KOI8-U/tcsh.cat
OLD_DIRS+=usr/share/nls/uk_UA.UTF-8
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/grep.cat
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/libc.cat
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/tcsh.cat
OLD_DIRS+=usr/share/nls/zh_CN.GB18030
OLD_FILES+=usr/share/nls/zh_CN.GB18030/libc.cat
OLD_DIRS+=usr/share/nls/zh_CN.GBK
OLD_DIRS+=usr/share/nls/zh_CN.GB2312
OLD_FILES+=usr/share/nls/zh_CN.GB2312/libc.cat
OLD_DIRS+=usr/share/nls/zh_CN.UTF-8
OLD_FILES+=usr/share/nls/zh_CN.UTF-8/grep.cat
OLD_FILES+=usr/share/nls/zh_CN.UTF-8/libc.cat
OLD_DIRS+=usr/share/nls/zh_CN.eucCN
OLD_DIRS+=usr/share/nls/zh_HK.UTF-8
OLD_DIRS+=usr/share/nls/zh_TW.UTF-8
OLD_FILES+=usr/tests/bin/sh/builtins/locale1.0
.endif
.if ${MK_NLS_CATALOGS} == no
OLD_FILES+=usr/share/nls/de_AT.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/de_CH.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/de_DE.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/el_GR.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/es_ES.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/et_EE.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fi_FI.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_BE.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CA.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CH.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_FR.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/it_CH.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/it_IT.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/ja_JP.SJIS/tcsh.cat
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.CP1251/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.CP866/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.ISO8859-5/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/uk_UA.ISO8859-5/tcsh.cat
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/tcsh.cat
.endif
.if ${MK_NS_CACHING} == no
OLD_FILES+=etc/nscd.conf
OLD_FILES+=etc/rc.d/nscd
OLD_FILES+=usr/sbin/nscd
OLD_FILES+=usr/share/examples/etc/nscd.conf
OLD_FILES+=usr/share/man/man5/nscd.conf.5.gz
OLD_FILES+=usr/share/man/man8/nscd.8.gz
.endif
.if ${MK_NTP} == no
OLD_FILES+=etc/ntp.conf
OLD_FILES+=etc/periodic/daily/480.status-ntpd
OLD_FILES+=usr/bin/ntpq
OLD_FILES+=usr/sbin/ntp-keygen
OLD_FILES+=usr/sbin/ntpd
OLD_FILES+=usr/sbin/ntpdate
OLD_FILES+=usr/sbin/ntpdc
OLD_FILES+=usr/sbin/ntptime
OLD_FILES+=usr/sbin/sntp
OLD_FILES+=usr/share/doc/ntp/access.html
OLD_FILES+=usr/share/doc/ntp/accopt.html
OLD_FILES+=usr/share/doc/ntp/assoc.html
OLD_FILES+=usr/share/doc/ntp/audio.html
OLD_FILES+=usr/share/doc/ntp/authentic.html
OLD_FILES+=usr/share/doc/ntp/authopt.html
OLD_FILES+=usr/share/doc/ntp/autokey.html
OLD_FILES+=usr/share/doc/ntp/bugs.html
OLD_FILES+=usr/share/doc/ntp/build.html
OLD_FILES+=usr/share/doc/ntp/clock.html
OLD_FILES+=usr/share/doc/ntp/clockopt.html
OLD_FILES+=usr/share/doc/ntp/cluster.html
OLD_FILES+=usr/share/doc/ntp/comdex.html
OLD_FILES+=usr/share/doc/ntp/config.html
OLD_FILES+=usr/share/doc/ntp/confopt.html
OLD_FILES+=usr/share/doc/ntp/copyright.html
OLD_FILES+=usr/share/doc/ntp/debug.html
OLD_FILES+=usr/share/doc/ntp/decode.html
OLD_FILES+=usr/share/doc/ntp/discipline.html
OLD_FILES+=usr/share/doc/ntp/discover.html
OLD_FILES+=usr/share/doc/ntp/driver1.html
OLD_FILES+=usr/share/doc/ntp/driver10.html
OLD_FILES+=usr/share/doc/ntp/driver11.html
OLD_FILES+=usr/share/doc/ntp/driver12.html
OLD_FILES+=usr/share/doc/ntp/driver16.html
OLD_FILES+=usr/share/doc/ntp/driver18.html
OLD_FILES+=usr/share/doc/ntp/driver19.html
OLD_FILES+=usr/share/doc/ntp/driver2.html
OLD_FILES+=usr/share/doc/ntp/driver20.html
OLD_FILES+=usr/share/doc/ntp/driver22.html
OLD_FILES+=usr/share/doc/ntp/driver26.html
OLD_FILES+=usr/share/doc/ntp/driver27.html
OLD_FILES+=usr/share/doc/ntp/driver28.html
OLD_FILES+=usr/share/doc/ntp/driver29.html
OLD_FILES+=usr/share/doc/ntp/driver3.html
OLD_FILES+=usr/share/doc/ntp/driver30.html
OLD_FILES+=usr/share/doc/ntp/driver32.html
OLD_FILES+=usr/share/doc/ntp/driver33.html
OLD_FILES+=usr/share/doc/ntp/driver34.html
OLD_FILES+=usr/share/doc/ntp/driver35.html
OLD_FILES+=usr/share/doc/ntp/driver36.html
OLD_FILES+=usr/share/doc/ntp/driver37.html
OLD_FILES+=usr/share/doc/ntp/driver4.html
OLD_FILES+=usr/share/doc/ntp/driver5.html
OLD_FILES+=usr/share/doc/ntp/driver6.html
OLD_FILES+=usr/share/doc/ntp/driver7.html
OLD_FILES+=usr/share/doc/ntp/driver8.html
OLD_FILES+=usr/share/doc/ntp/driver9.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver1.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver10.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver11.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver12.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver16.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver18.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver19.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver20.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver22.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver26.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver27.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver28.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver29.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver3.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver30.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver31.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver32.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver33.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver34.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver35.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver36.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver37.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver38.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver39.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver4.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver40.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver42.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver43.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver44.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver45.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver46.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver5.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver6.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver7.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver8.html
OLD_FILES+=usr/share/doc/ntp/drivers/driver9.html
OLD_FILES+=usr/share/doc/ntp/drivers/icons/home.gif
OLD_FILES+=usr/share/doc/ntp/drivers/icons/mail2.gif
OLD_FILES+=usr/share/doc/ntp/drivers/mx4200data.html
OLD_FILES+=usr/share/doc/ntp/drivers/oncore-shmem.html
OLD_FILES+=usr/share/doc/ntp/drivers/scripts/footer.txt
OLD_FILES+=usr/share/doc/ntp/drivers/scripts/style.css
OLD_FILES+=usr/share/doc/ntp/drivers/tf582_4.html
OLD_FILES+=usr/share/doc/ntp/extern.html
OLD_FILES+=usr/share/doc/ntp/filter.html
OLD_FILES+=usr/share/doc/ntp/hints.html
OLD_FILES+=usr/share/doc/ntp/hints/a-ux
OLD_FILES+=usr/share/doc/ntp/hints/aix
OLD_FILES+=usr/share/doc/ntp/hints/bsdi
OLD_FILES+=usr/share/doc/ntp/hints/changes
OLD_FILES+=usr/share/doc/ntp/hints/decosf1
OLD_FILES+=usr/share/doc/ntp/hints/decosf2
OLD_FILES+=usr/share/doc/ntp/hints/freebsd
OLD_FILES+=usr/share/doc/ntp/hints/hpux
OLD_FILES+=usr/share/doc/ntp/hints/linux
OLD_FILES+=usr/share/doc/ntp/hints/mpeix
OLD_FILES+=usr/share/doc/ntp/hints/notes-xntp-v3
OLD_FILES+=usr/share/doc/ntp/hints/parse
OLD_FILES+=usr/share/doc/ntp/hints/refclocks
OLD_FILES+=usr/share/doc/ntp/hints/rs6000
OLD_FILES+=usr/share/doc/ntp/hints/sco.html
OLD_FILES+=usr/share/doc/ntp/hints/sgi
OLD_FILES+=usr/share/doc/ntp/hints/solaris-dosynctodr.html
OLD_FILES+=usr/share/doc/ntp/hints/solaris.html
OLD_FILES+=usr/share/doc/ntp/hints/solaris.xtra.4023118
OLD_FILES+=usr/share/doc/ntp/hints/solaris.xtra.4095849
OLD_FILES+=usr/share/doc/ntp/hints/solaris.xtra.S99ntpd
OLD_FILES+=usr/share/doc/ntp/hints/solaris.xtra.patchfreq
OLD_FILES+=usr/share/doc/ntp/hints/sun4
OLD_FILES+=usr/share/doc/ntp/hints/svr4-dell
OLD_FILES+=usr/share/doc/ntp/hints/svr4_package
OLD_FILES+=usr/share/doc/ntp/hints/todo
OLD_FILES+=usr/share/doc/ntp/hints/vxworks.html
OLD_FILES+=usr/share/doc/ntp/hints/winnt.html
OLD_FILES+=usr/share/doc/ntp/history.html
OLD_FILES+=usr/share/doc/ntp/howto.html
OLD_FILES+=usr/share/doc/ntp/huffpuff.html
OLD_FILES+=usr/share/doc/ntp/icons/home.gif
OLD_FILES+=usr/share/doc/ntp/icons/mail2.gif
OLD_FILES+=usr/share/doc/ntp/icons/sitemap.png
OLD_FILES+=usr/share/doc/ntp/index.html
OLD_FILES+=usr/share/doc/ntp/kern.html
OLD_FILES+=usr/share/doc/ntp/kernpps.html
OLD_FILES+=usr/share/doc/ntp/keygen.html
OLD_FILES+=usr/share/doc/ntp/ldisc.html
OLD_FILES+=usr/share/doc/ntp/leap.html
OLD_FILES+=usr/share/doc/ntp/measure.html
OLD_FILES+=usr/share/doc/ntp/miscopt.html
OLD_FILES+=usr/share/doc/ntp/monopt.html
OLD_FILES+=usr/share/doc/ntp/msyslog.html
OLD_FILES+=usr/share/doc/ntp/mx4200data.html
OLD_FILES+=usr/share/doc/ntp/notes.html
OLD_FILES+=usr/share/doc/ntp/ntp-keygen.html
OLD_FILES+=usr/share/doc/ntp/ntp-wait.html
OLD_FILES+=usr/share/doc/ntp/ntp.conf.html
OLD_FILES+=usr/share/doc/ntp/ntp.keys.html
OLD_FILES+=usr/share/doc/ntp/ntp_conf.html
OLD_FILES+=usr/share/doc/ntp/ntpd.html
OLD_FILES+=usr/share/doc/ntp/ntpdate.html
OLD_FILES+=usr/share/doc/ntp/ntpdc.html
OLD_FILES+=usr/share/doc/ntp/ntpdsim.html
OLD_FILES+=usr/share/doc/ntp/ntpdsim_new.html
OLD_FILES+=usr/share/doc/ntp/ntpq.html
OLD_FILES+=usr/share/doc/ntp/ntpsnmpd.html
OLD_FILES+=usr/share/doc/ntp/ntptime.html
OLD_FILES+=usr/share/doc/ntp/ntptrace.html
OLD_FILES+=usr/share/doc/ntp/orphan.html
OLD_FILES+=usr/share/doc/ntp/parsedata.html
OLD_FILES+=usr/share/doc/ntp/parsenew.html
OLD_FILES+=usr/share/doc/ntp/patches.html
OLD_FILES+=usr/share/doc/ntp/pic/9400n.jpg
OLD_FILES+=usr/share/doc/ntp/pic/alice11.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice13.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice15.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice23.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice31.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice32.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice35.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice38.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice44.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice47.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice51.gif
OLD_FILES+=usr/share/doc/ntp/pic/alice61.gif
OLD_FILES+=usr/share/doc/ntp/pic/barnstable.gif
OLD_FILES+=usr/share/doc/ntp/pic/beaver.gif
OLD_FILES+=usr/share/doc/ntp/pic/boom3.gif
OLD_FILES+=usr/share/doc/ntp/pic/boom3a.gif
OLD_FILES+=usr/share/doc/ntp/pic/boom4.gif
OLD_FILES+=usr/share/doc/ntp/pic/broad.gif
OLD_FILES+=usr/share/doc/ntp/pic/bustardfly.gif
OLD_FILES+=usr/share/doc/ntp/pic/c51.jpg
OLD_FILES+=usr/share/doc/ntp/pic/description.jpg
OLD_FILES+=usr/share/doc/ntp/pic/discipline.gif
OLD_FILES+=usr/share/doc/ntp/pic/dogsnake.gif
OLD_FILES+=usr/share/doc/ntp/pic/driver29.gif
OLD_FILES+=usr/share/doc/ntp/pic/driver43_1.gif
OLD_FILES+=usr/share/doc/ntp/pic/driver43_2.jpg
OLD_FILES+=usr/share/doc/ntp/pic/fg6021.gif
OLD_FILES+=usr/share/doc/ntp/pic/fg6039.jpg
OLD_FILES+=usr/share/doc/ntp/pic/fig_3_1.gif
OLD_FILES+=usr/share/doc/ntp/pic/flatheads.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt1.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt2.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt3.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt4.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt5.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt6.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt7.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt8.gif
OLD_FILES+=usr/share/doc/ntp/pic/flt9.gif
OLD_FILES+=usr/share/doc/ntp/pic/freq1211.gif
OLD_FILES+=usr/share/doc/ntp/pic/gadget.jpg
OLD_FILES+=usr/share/doc/ntp/pic/gps167.jpg
OLD_FILES+=usr/share/doc/ntp/pic/group.gif
OLD_FILES+=usr/share/doc/ntp/pic/hornraba.gif
OLD_FILES+=usr/share/doc/ntp/pic/igclock.gif
OLD_FILES+=usr/share/doc/ntp/pic/neoclock4x.gif
OLD_FILES+=usr/share/doc/ntp/pic/offset1211.gif
OLD_FILES+=usr/share/doc/ntp/pic/oncore_evalbig.gif
OLD_FILES+=usr/share/doc/ntp/pic/oncore_remoteant.jpg
OLD_FILES+=usr/share/doc/ntp/pic/oncore_utplusbig.gif
OLD_FILES+=usr/share/doc/ntp/pic/oz2.gif
OLD_FILES+=usr/share/doc/ntp/pic/panda.gif
OLD_FILES+=usr/share/doc/ntp/pic/pd_om006.gif
OLD_FILES+=usr/share/doc/ntp/pic/pd_om011.gif
OLD_FILES+=usr/share/doc/ntp/pic/peer.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo1a.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo3a.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo4.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo5.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo6.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo7.gif
OLD_FILES+=usr/share/doc/ntp/pic/pogo8.gif
OLD_FILES+=usr/share/doc/ntp/pic/pzf509.jpg
OLD_FILES+=usr/share/doc/ntp/pic/pzf511.jpg
OLD_FILES+=usr/share/doc/ntp/pic/rabbit.gif
OLD_FILES+=usr/share/doc/ntp/pic/radio2.jpg
OLD_FILES+=usr/share/doc/ntp/pic/sheepb.jpg
OLD_FILES+=usr/share/doc/ntp/pic/stack1a.jpg
OLD_FILES+=usr/share/doc/ntp/pic/stats.gif
OLD_FILES+=usr/share/doc/ntp/pic/sx5.gif
OLD_FILES+=usr/share/doc/ntp/pic/thunderbolt.jpg
OLD_FILES+=usr/share/doc/ntp/pic/time1.gif
OLD_FILES+=usr/share/doc/ntp/pic/tonea.gif
OLD_FILES+=usr/share/doc/ntp/pic/tribeb.gif
OLD_FILES+=usr/share/doc/ntp/pic/wingdorothy.gif
OLD_FILES+=usr/share/doc/ntp/poll.html
OLD_FILES+=usr/share/doc/ntp/porting.html
OLD_FILES+=usr/share/doc/ntp/pps.html
OLD_FILES+=usr/share/doc/ntp/prefer.html
OLD_FILES+=usr/share/doc/ntp/quick.html
OLD_FILES+=usr/share/doc/ntp/rate.html
OLD_FILES+=usr/share/doc/ntp/rdebug.html
OLD_FILES+=usr/share/doc/ntp/refclock.html
OLD_FILES+=usr/share/doc/ntp/release.html
OLD_FILES+=usr/share/doc/ntp/scripts/accopt.txt
OLD_FILES+=usr/share/doc/ntp/scripts/audio.txt
OLD_FILES+=usr/share/doc/ntp/scripts/authopt.txt
OLD_FILES+=usr/share/doc/ntp/scripts/clockopt.txt
OLD_FILES+=usr/share/doc/ntp/scripts/command.txt
OLD_FILES+=usr/share/doc/ntp/scripts/config.txt
OLD_FILES+=usr/share/doc/ntp/scripts/confopt.txt
OLD_FILES+=usr/share/doc/ntp/scripts/external.txt
OLD_FILES+=usr/share/doc/ntp/scripts/footer.txt
OLD_FILES+=usr/share/doc/ntp/scripts/hand.txt
OLD_FILES+=usr/share/doc/ntp/scripts/install.txt
OLD_FILES+=usr/share/doc/ntp/scripts/manual.txt
OLD_FILES+=usr/share/doc/ntp/scripts/misc.txt
OLD_FILES+=usr/share/doc/ntp/scripts/miscopt.txt
OLD_FILES+=usr/share/doc/ntp/scripts/monopt.txt
OLD_FILES+=usr/share/doc/ntp/scripts/refclock.txt
OLD_FILES+=usr/share/doc/ntp/scripts/special.txt
OLD_FILES+=usr/share/doc/ntp/scripts/style.css
OLD_FILES+=usr/share/doc/ntp/select.html
OLD_FILES+=usr/share/doc/ntp/sitemap.html
OLD_FILES+=usr/share/doc/ntp/sntp.html
OLD_FILES+=usr/share/doc/ntp/stats.html
OLD_FILES+=usr/share/doc/ntp/tickadj.html
OLD_FILES+=usr/share/doc/ntp/warp.html
OLD_FILES+=usr/share/doc/ntp/xleave.html
OLD_DIRS+=usr/share/doc/ntp/drivers
OLD_DIRS+=usr/share/doc/ntp/drivers/scripts
OLD_DIRS+=usr/share/doc/ntp/drivers/icons
OLD_DIRS+=usr/share/doc/ntp/hints
OLD_DIRS+=usr/share/doc/ntp/icons
OLD_DIRS+=usr/share/doc/ntp/pic
OLD_DIRS+=usr/share/doc/ntp/scripts
OLD_DIRS+=usr/share/doc/ntp
OLD_FILES+=usr/share/examples/etc/ntp.conf
OLD_FILES+=usr/share/man/man1/sntp.1.gz
OLD_FILES+=usr/share/man/man5/ntp.conf.5.gz
OLD_FILES+=usr/share/man/man5/ntp.keys.5.gz
OLD_FILES+=usr/share/man/man8/ntp-keygen.8.gz
OLD_FILES+=usr/share/man/man8/ntpd.8.gz
OLD_FILES+=usr/share/man/man8/ntpdate.8.gz
OLD_FILES+=usr/share/man/man8/ntpdc.8.gz
OLD_FILES+=usr/share/man/man8/ntpq.8.gz
OLD_FILES+=usr/share/man/man8/ntptime.8.gz
.endif
.if ${MK_OPENSSH} == no
OLD_FILES+=etc/rc.d/sshd
OLD_FILES+=etc/ssh/moduli
OLD_FILES+=etc/ssh/ssh_config
OLD_FILES+=etc/ssh/sshd_config
OLD_FILES+=usr/bin/scp
OLD_FILES+=usr/bin/sftp
OLD_FILES+=usr/bin/slogin
OLD_FILES+=usr/bin/ssh
OLD_FILES+=usr/bin/ssh-add
OLD_FILES+=usr/bin/ssh-agent
OLD_FILES+=usr/bin/ssh-copy-id
OLD_FILES+=usr/bin/ssh-keygen
OLD_FILES+=usr/bin/ssh-keyscan
OLD_FILES+=usr/lib/pam_ssh.so
OLD_LIBS+=usr/lib/pam_ssh.so.6
OLD_FILES+=usr/lib/private/libssh.a
OLD_FILES+=usr/lib/private/libssh.so
OLD_LIBS+=usr/lib/private/libssh.so.5
OLD_FILES+=usr/lib/private/libssh_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/pam_ssh.so
OLD_LIBS+=usr/lib32/pam_ssh.so.6
OLD_FILES+=usr/lib32/private/libssh.a
OLD_FILES+=usr/lib32/private/libssh.so
OLD_LIBS+=usr/lib32/private/libssh.so.5
OLD_FILES+=usr/lib32/private/libssh_p.a
.endif
OLD_FILES+=usr/libexec/sftp-server
OLD_FILES+=usr/libexec/ssh-keysign
OLD_FILES+=usr/libexec/ssh-pkcs11-helper
OLD_FILES+=usr/sbin/sshd
OLD_FILES+=usr/share/man/man1/scp.1.gz
OLD_FILES+=usr/share/man/man1/sftp.1.gz
OLD_FILES+=usr/share/man/man1/slogin.1.gz
OLD_FILES+=usr/share/man/man1/ssh-add.1.gz
OLD_FILES+=usr/share/man/man1/ssh-agent.1.gz
OLD_FILES+=usr/share/man/man1/ssh-copy-id.1.gz
OLD_FILES+=usr/share/man/man1/ssh-keygen.1.gz
OLD_FILES+=usr/share/man/man1/ssh-keyscan.1.gz
OLD_FILES+=usr/share/man/man1/ssh.1.gz
OLD_FILES+=usr/share/man/man5/ssh_config.5.gz
OLD_FILES+=usr/share/man/man5/sshd_config.5.gz
OLD_FILES+=usr/share/man/man8/pam_ssh.8.gz
OLD_FILES+=usr/share/man/man8/sftp-server.8.gz
OLD_FILES+=usr/share/man/man8/ssh-keysign.8.gz
OLD_FILES+=usr/share/man/man8/ssh-pkcs11-helper.8.gz
OLD_FILES+=usr/share/man/man8/sshd.8.gz
.endif
.if ${MK_OPENSSL} == no
OLD_FILES+=etc/rc.d/keyserv
.endif
.if ${MK_PC_SYSINSTALL} == no
# backend-partmanager
OLD_FILES+=usr/share/pc-sysinstall/backend-partmanager/create-part.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-partmanager/delete-part.sh
# backend-query
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-emulation.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-laptop.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/detect-nics.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/disk-info.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/disk-list.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/disk-part.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/enable-net.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/get-packages.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-components.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-config.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-mirrors.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-packages.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-rsync-backups.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/list-tzones.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/query-langs.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/send-logs.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/setup-ssh-keys.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/set-mirror.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/sys-mem.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/test-live.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/test-netup.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/update-part-list.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/xkeyboard-layouts.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/xkeyboard-models.sh
OLD_FILES+=usr/share/pc-sysinstall/backend-query/xkeyboard-variants.sh
# backend
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-bsdlabel.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-cleanup.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-disk.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-extractimage.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-ftp.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-installcomponents.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-installpackages.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-localize.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-mountdisk.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-mountoptical.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-networking.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-newfs.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-parse.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-packages.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-runcommands.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-unmount.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-upgrade.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions-users.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/functions.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/installimage.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/parseconfig.sh
OLD_FILES+=usr/share/pc-sysinstall/backend/startautoinstall.sh
# conf
OLD_FILES+=usr/share/pc-sysinstall/conf/avail-langs
OLD_FILES+=usr/share/pc-sysinstall/conf/exclude-from-upgrade
OLD_FILES+=usr/share/pc-sysinstall/conf/license/bsd-en.txt
OLD_FILES+=usr/share/pc-sysinstall/conf/license/intel-en.txt
OLD_FILES+=usr/share/pc-sysinstall/conf/license/nvidia-en.txt
OLD_FILES+=usr/share/pc-sysinstall/conf/pc-sysinstall.conf
# doc
OLD_FILES+=usr/share/pc-sysinstall/doc/help-disk-list
OLD_FILES+=usr/share/pc-sysinstall/doc/help-disk-size
OLD_FILES+=usr/share/pc-sysinstall/doc/help-index
OLD_FILES+=usr/share/pc-sysinstall/doc/help-start-autoinstall
# examples
OLD_FILES+=usr/share/examples/pc-sysinstall/README
OLD_FILES+=usr/share/examples/pc-sysinstall/pc-autoinstall.conf
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.fbsd-netinstall
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.geli
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.gmirror
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.netinstall
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.restore
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.rsync
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.upgrade
OLD_FILES+=usr/share/examples/pc-sysinstall/pcinstall.cfg.zfs
# pc-sysinstall
OLD_FILES+=usr/sbin/pc-sysinstall
OLD_FILES+=usr/share/man/man8/pc-sysinstall.8.gz
OLD_DIRS+=usr/share/pc-sysinstall/backend
OLD_DIRS+=usr/share/pc-sysinstall/backend-partmanager
OLD_DIRS+=usr/share/pc-sysinstall/backend-query
OLD_DIRS+=usr/share/pc-sysinstall/conf/license
OLD_DIRS+=usr/share/pc-sysinstall/conf
OLD_DIRS+=usr/share/pc-sysinstall/doc
OLD_DIRS+=usr/share/pc-sysinstall
OLD_DIRS+=usr/share/examples/pc-sysinstall
.endif
.if ${MK_PF} == no
OLD_FILES+=etc/newsyslog.conf.d/pf.conf
OLD_FILES+=etc/periodic/security/520.pfdenied
OLD_FILES+=etc/pf.os
OLD_FILES+=etc/rc.d/ftp-proxy
OLD_FILES+=sbin/pfctl
OLD_FILES+=sbin/pflogd
OLD_FILES+=usr/include/netpfil/pf/pf.h
OLD_FILES+=usr/include/netpfil/pf/pf_altq.h
OLD_FILES+=usr/include/netpfil/pf/pf_mtag.h
OLD_FILES+=usr/lib/snmp_pf.so
OLD_LIBS+=usr/lib/snmp_pf.so.6
OLD_FILES+=usr/libexec/tftp-proxy
OLD_FILES+=usr/sbin/ftp-proxy
OLD_FILES+=usr/share/examples/etc/pf.os
OLD_FILES+=usr/share/examples/pf/ackpri
OLD_FILES+=usr/share/examples/pf/faq-example1
OLD_FILES+=usr/share/examples/pf/faq-example2
OLD_FILES+=usr/share/examples/pf/faq-example3
OLD_FILES+=usr/share/examples/pf/pf.conf
OLD_FILES+=usr/share/examples/pf/queue1
OLD_FILES+=usr/share/examples/pf/queue2
OLD_FILES+=usr/share/examples/pf/queue3
OLD_FILES+=usr/share/examples/pf/queue4
OLD_FILES+=usr/share/examples/pf/spamd
OLD_DIRS+=usr/share/examples/pf
OLD_FILES+=usr/share/man/man4/pf.4.gz
OLD_FILES+=usr/share/man/man4/pflog.4.gz
OLD_FILES+=usr/share/man/man4/pfsync.4.gz
OLD_FILES+=usr/share/man/man5/pf.conf.5.gz
OLD_FILES+=usr/share/man/man5/pf.os.5.gz
OLD_FILES+=usr/share/man/man8/ftp-proxy.8.gz
OLD_FILES+=usr/share/man/man8/pfctl.8.gz
OLD_FILES+=usr/share/man/man8/pflogd.8.gz
OLD_FILES+=usr/share/man/man8/tftp-proxy.8.gz
OLD_FILES+=usr/share/snmp/defs/pf_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-PF-MIB.txt
.endif
.if ${MK_PKGBOOTSTRAP} == no
OLD_FILES+=usr/sbin/pkg
OLD_FILES+=usr/share/man/man7/pkg.7.gz
.endif
.if ${MK_PMC} == no
OLD_FILES+=usr/bin/pmcstudy
OLD_FILES+=usr/include/pmc.h
OLD_FILES+=usr/include/pmclog.h
OLD_FILES+=usr/lib/libpmc.a
OLD_FILES+=usr/lib/libpmc.so
OLD_LIBS+=usr/lib/libpmc.so.5
OLD_FILES+=usr/lib/libpmc_p.a
OLD_FILES+=usr/lib32/libpmc.a
OLD_FILES+=usr/lib32/libpmc.so
OLD_LIBS+=usr/lib32/libpmc.so.5
OLD_FILES+=usr/lib32/libpmc_p.a
OLD_FILES+=usr/sbin/pmcannotate
OLD_FILES+=usr/sbin/pmccontrol
OLD_FILES+=usr/sbin/pmcstat
OLD_FILES+=usr/share/man/man1/pmcstudy.1.gz
OLD_FILES+=usr/share/man/man3/pmc.3.gz
OLD_FILES+=usr/share/man/man3/pmc.atom.3.gz
OLD_FILES+=usr/share/man/man3/pmc.atomsilvermont.3.gz
OLD_FILES+=usr/share/man/man3/pmc.core.3.gz
OLD_FILES+=usr/share/man/man3/pmc.core2.3.gz
OLD_FILES+=usr/share/man/man3/pmc.corei7.3.gz
OLD_FILES+=usr/share/man/man3/pmc.corei7uc.3.gz
OLD_FILES+=usr/share/man/man3/pmc.haswell.3.gz
OLD_FILES+=usr/share/man/man3/pmc.haswelluc.3.gz
OLD_FILES+=usr/share/man/man3/pmc.iaf.3.gz
OLD_FILES+=usr/share/man/man3/pmc.ivybridge.3.gz
OLD_FILES+=usr/share/man/man3/pmc.ivybridgexeon.3.gz
OLD_FILES+=usr/share/man/man3/pmc.k7.3.gz
OLD_FILES+=usr/share/man/man3/pmc.k8.3.gz
OLD_FILES+=usr/share/man/man3/pmc.mips24k.3.gz
OLD_FILES+=usr/share/man/man3/pmc.octeon.3.gz
OLD_FILES+=usr/share/man/man3/pmc.p4.3.gz
OLD_FILES+=usr/share/man/man3/pmc.p5.3.gz
OLD_FILES+=usr/share/man/man3/pmc.p6.3.gz
OLD_FILES+=usr/share/man/man3/pmc.sandybridge.3.gz
OLD_FILES+=usr/share/man/man3/pmc.sandybridgeuc.3.gz
OLD_FILES+=usr/share/man/man3/pmc.sandybridgexeon.3.gz
OLD_FILES+=usr/share/man/man3/pmc.soft.3.gz
OLD_FILES+=usr/share/man/man3/pmc.tsc.3.gz
OLD_FILES+=usr/share/man/man3/pmc.ucf.3.gz
OLD_FILES+=usr/share/man/man3/pmc.westmere.3.gz
OLD_FILES+=usr/share/man/man3/pmc.westmereuc.3.gz
OLD_FILES+=usr/share/man/man3/pmc.xscale.3.gz
OLD_FILES+=usr/share/man/man3/pmc_allocate.3.gz
OLD_FILES+=usr/share/man/man3/pmc_attach.3.gz
OLD_FILES+=usr/share/man/man3/pmc_capabilities.3.gz
OLD_FILES+=usr/share/man/man3/pmc_configure_logfile.3.gz
OLD_FILES+=usr/share/man/man3/pmc_cpuinfo.3.gz
OLD_FILES+=usr/share/man/man3/pmc_detach.3.gz
OLD_FILES+=usr/share/man/man3/pmc_disable.3.gz
OLD_FILES+=usr/share/man/man3/pmc_enable.3.gz
OLD_FILES+=usr/share/man/man3/pmc_event_names_of_class.3.gz
OLD_FILES+=usr/share/man/man3/pmc_flush_logfile.3.gz
OLD_FILES+=usr/share/man/man3/pmc_get_driver_stats.3.gz
OLD_FILES+=usr/share/man/man3/pmc_get_msr.3.gz
OLD_FILES+=usr/share/man/man3/pmc_init.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_capability.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_class.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_cputype.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_disposition.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_event.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_mode.3.gz
OLD_FILES+=usr/share/man/man3/pmc_name_of_state.3.gz
OLD_FILES+=usr/share/man/man3/pmc_ncpu.3.gz
OLD_FILES+=usr/share/man/man3/pmc_npmc.3.gz
OLD_FILES+=usr/share/man/man3/pmc_pmcinfo.3.gz
OLD_FILES+=usr/share/man/man3/pmc_read.3.gz
OLD_FILES+=usr/share/man/man3/pmc_release.3.gz
OLD_FILES+=usr/share/man/man3/pmc_rw.3.gz
OLD_FILES+=usr/share/man/man3/pmc_set.3.gz
OLD_FILES+=usr/share/man/man3/pmc_start.3.gz
OLD_FILES+=usr/share/man/man3/pmc_stop.3.gz
OLD_FILES+=usr/share/man/man3/pmc_width.3.gz
OLD_FILES+=usr/share/man/man3/pmc_write.3.gz
OLD_FILES+=usr/share/man/man3/pmc_writelog.3.gz
OLD_FILES+=usr/share/man/man3/pmclog.3.gz
OLD_FILES+=usr/share/man/man3/pmclog_close.3.gz
OLD_FILES+=usr/share/man/man3/pmclog_feed.3.gz
OLD_FILES+=usr/share/man/man3/pmclog_open.3.gz
OLD_FILES+=usr/share/man/man3/pmclog_read.3.gz
OLD_FILES+=usr/share/man/man8/pmcannotate.8.gz
OLD_FILES+=usr/share/man/man8/pmccontrol.8.gz
OLD_FILES+=usr/share/man/man8/pmcstat.8.gz
.endif
.if ${MK_PORTSNAP} == no
OLD_FILES+=etc/portsnap.conf
OLD_FILES+=usr/libexec/make_index
OLD_FILES+=usr/libexec/phttpget
OLD_FILES+=usr/sbin/portsnap
OLD_FILES+=usr/share/examples/etc/portsnap.conf
OLD_FILES+=usr/share/man/man8/phttpget.8.gz
OLD_FILES+=usr/share/man/man8/portsnap.8.gz
.endif
.if ${MK_PPP} == no
OLD_FILES+=etc/newsyslog.conf.d/ppp.conf
OLD_FILES+=etc/ppp/ppp.conf
OLD_FILES+=etc/syslog.d/ppp.conf
OLD_DIRS+=etc/ppp
OLD_FILES+=usr/sbin/ppp
OLD_FILES+=usr/sbin/pppctl
OLD_FILES+=usr/share/man/man8/ppp.8.gz
OLD_FILES+=usr/share/man/man8/pppctl.8.gz
.endif
.if ${MK_PROFILE} == no
OLD_FILES+=usr/lib/libalias_cuseeme_p.a
OLD_FILES+=usr/lib/libalias_dummy_p.a
OLD_FILES+=usr/lib/libalias_ftp_p.a
OLD_FILES+=usr/lib/libalias_irc_p.a
OLD_FILES+=usr/lib/libalias_nbt_p.a
OLD_FILES+=usr/lib/libalias_p.a
OLD_FILES+=usr/lib/libalias_pptp_p.a
OLD_FILES+=usr/lib/libalias_skinny_p.a
OLD_FILES+=usr/lib/libalias_smedia_p.a
OLD_FILES+=usr/lib/libarchive_p.a
OLD_FILES+=usr/lib/libasn1_p.a
OLD_FILES+=usr/lib/libbegemot_p.a
OLD_FILES+=usr/lib/libbluetooth_p.a
OLD_FILES+=usr/lib/libbsdxml_p.a
OLD_FILES+=usr/lib/libbsm_p.a
OLD_FILES+=usr/lib/libbsnmp_p.a
OLD_FILES+=usr/lib/libbz2_p.a
OLD_FILES+=usr/lib/libc_p.a
OLD_FILES+=usr/lib/libcalendar_p.a
OLD_FILES+=usr/lib/libcam_p.a
OLD_FILES+=usr/lib/libcom_err_p.a
OLD_FILES+=usr/lib/libcompat_p.a
OLD_FILES+=usr/lib/libcrypt_p.a
OLD_FILES+=usr/lib/libcrypto_p.a
OLD_FILES+=usr/lib/libcurses_p.a
OLD_FILES+=usr/lib/libcursesw_p.a
OLD_FILES+=usr/lib/libdevinfo_p.a
OLD_FILES+=usr/lib/libdevstat_p.a
OLD_FILES+=usr/lib/libdialog_p.a
OLD_FILES+=usr/lib/libedit_p.a
OLD_FILES+=usr/lib/libelf_p.a
OLD_FILES+=usr/lib/libfetch_p.a
OLD_FILES+=usr/lib/libfl_p.a
OLD_FILES+=usr/lib/libform_p.a
OLD_FILES+=usr/lib/libformw_p.a
OLD_FILES+=usr/lib/libgcc_p.a
OLD_FILES+=usr/lib/libgeom_p.a
OLD_FILES+=usr/lib/libgnuregex_p.a
OLD_FILES+=usr/lib/libgssapi_krb5_p.a
OLD_FILES+=usr/lib/libgssapi_p.a
OLD_FILES+=usr/lib/libhdb_p.a
OLD_FILES+=usr/lib/libheimbase_p.a
OLD_FILES+=usr/lib/libheimsqlite_p.a
OLD_FILES+=usr/lib/libhistory_p.a
OLD_FILES+=usr/lib/libipsec_p.a
OLD_FILES+=usr/lib/libjail_p.a
OLD_FILES+=usr/lib/libkadm5clnt_p.a
OLD_FILES+=usr/lib/libkadm5srv_p.a
OLD_FILES+=usr/lib/libkafs5_p.a
OLD_FILES+=usr/lib/libkdc_p.a
OLD_FILES+=usr/lib/libkiconv_p.a
OLD_FILES+=usr/lib/libkrb5_p.a
OLD_FILES+=usr/lib/libkvm_p.a
OLD_FILES+=usr/lib/libl_p.a
OLD_FILES+=usr/lib/libln_p.a
OLD_FILES+=usr/lib/libm_p.a
OLD_FILES+=usr/lib/libmagic_p.a
OLD_FILES+=usr/lib/libmd_p.a
OLD_FILES+=usr/lib/libmemstat_p.a
OLD_FILES+=usr/lib/libmenu_p.a
OLD_FILES+=usr/lib/libmenuw_p.a
OLD_FILES+=usr/lib/libmilter_p.a
OLD_FILES+=usr/lib/libmp_p.a
OLD_FILES+=usr/lib/libncurses_p.a
OLD_FILES+=usr/lib/libncursesw_p.a
OLD_FILES+=usr/lib/libnetgraph_p.a
OLD_FILES+=usr/lib/libngatm_p.a
OLD_FILES+=usr/lib/libopie_p.a
OLD_FILES+=usr/lib/libpanel_p.a
OLD_FILES+=usr/lib/libpanelw_p.a
OLD_FILES+=usr/lib/libpcap_p.a
OLD_FILES+=usr/lib/libpmc_p.a
OLD_FILES+=usr/lib/libpthread_p.a
OLD_FILES+=usr/lib/libradius_p.a
OLD_FILES+=usr/lib/libroken_p.a
OLD_FILES+=usr/lib/librpcsvc_p.a
OLD_FILES+=usr/lib/librt_p.a
OLD_FILES+=usr/lib/libsbuf_p.a
OLD_FILES+=usr/lib/libsdp_p.a
OLD_FILES+=usr/lib/libsmb_p.a
OLD_FILES+=usr/lib/libssl_p.a
OLD_FILES+=usr/lib/libstdc++_p.a
OLD_FILES+=usr/lib/libsupc++_p.a
OLD_FILES+=usr/lib/libtacplus_p.a
OLD_FILES+=usr/lib/libtermcap_p.a
OLD_FILES+=usr/lib/libtermcapw_p.a
OLD_FILES+=usr/lib/libtermlib_p.a
OLD_FILES+=usr/lib/libtermlibw_p.a
OLD_FILES+=usr/lib/libthr_p.a
OLD_FILES+=usr/lib/libthread_db_p.a
OLD_FILES+=usr/lib/libtinfo_p.a
OLD_FILES+=usr/lib/libtinfow_p.a
OLD_FILES+=usr/lib/libufs_p.a
OLD_FILES+=usr/lib/libugidfw_p.a
OLD_FILES+=usr/lib/libusbhid_p.a
OLD_FILES+=usr/lib/libutil_p.a
OLD_FILES+=usr/lib/libvgl_p.a
OLD_FILES+=usr/lib/libwind_p.a
OLD_FILES+=usr/lib/libwrap_p.a
OLD_FILES+=usr/lib/liby_p.a
OLD_FILES+=usr/lib/libypclnt_p.a
OLD_FILES+=usr/lib/libz_p.a
OLD_FILES+=usr/lib/private/libldns_p.a
OLD_FILES+=usr/lib/private/libssh_p.a
.endif
.if ${MK_QUOTAS} == no
OLD_FILES+=sbin/quotacheck
OLD_FILES+=usr/bin/quota
OLD_FILES+=usr/sbin/edquota
OLD_FILES+=usr/sbin/quotaoff
OLD_FILES+=usr/sbin/quotaon
OLD_FILES+=usr/sbin/repquota
OLD_FILES+=usr/share/man/man1/quota.1.gz
OLD_FILES+=usr/share/man/man8/edquota.8.gz
OLD_FILES+=usr/share/man/man8/quotacheck.8.gz
OLD_FILES+=usr/share/man/man8/quotaoff.8.gz
OLD_FILES+=usr/share/man/man8/quotaon.8.gz
OLD_FILES+=usr/share/man/man8/repquota.8.gz
.endif
.if ${MK_RBOOTD} == no
OLD_FILES+=usr/libexec/rbootd
OLD_FILES+=usr/share/man/man8/rbootd.8.gz
.endif
.if ${MK_RESCUE} == no
. if exists(${DESTDIR}${TESTSBASE})
RESCUE_DIRS!=find ${DESTDIR}/rescue -type d 2>/dev/null | sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${RESCUE_DIRS}
RESCUE_FILES!=find ${DESTDIR}/rescue \! -type d 2>/dev/null | sed -e 's,^${DESTDIR}/,,'; echo
OLD_FILES+=${RESCUE_FILES}
. endif
.endif
.if ${MK_ROUTED} == no
OLD_FILES+=rescue/routed
OLD_FILES+=rescue/rtquery
OLD_FILES+=sbin/routed
OLD_FILES+=sbin/rtquery
OLD_FILES+=usr/share/man/man8/routed.8.gz
OLD_FILES+=usr/share/man/man8/rtquery.8.gz
.endif
.if ${MK_SENDMAIL} == no
OLD_FILES+=etc/newsyslog.conf.d/sendmail.conf
OLD_FILES+=etc/periodic/daily/150.clean-hoststat
OLD_FILES+=etc/periodic/daily/440.status-mailq
OLD_FILES+=etc/periodic/daily/460.status-mail-rejects
OLD_FILES+=etc/periodic/daily/500.queuerun
OLD_FILES+=bin/rmail
OLD_FILES+=usr/bin/vacation
OLD_FILES+=usr/include/libmilter/mfapi.h
OLD_FILES+=usr/include/libmilter/mfdef.h
OLD_DIRS+=usr/include/libmilter
OLD_FILES+=usr/lib/libmilter.a
OLD_FILES+=usr/lib/libmilter.so
OLD_LIBS+=usr/lib/libmilter.so.5
OLD_FILES+=usr/lib/libmilter_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/libmilter.a
OLD_FILES+=usr/lib32/libmilter.so
OLD_LIBS+=usr/lib32/libmilter.so.5
OLD_FILES+=usr/lib32/libmilter_p.a
.endif
OLD_FILES+=usr/libexec/mail.local
OLD_FILES+=usr/libexec/sendmail/sendmail
OLD_FILES+=usr/libexec/smrsh
OLD_FILES+=usr/sbin/editmap
OLD_FILES+=usr/sbin/mailstats
OLD_FILES+=usr/sbin/makemap
OLD_FILES+=usr/sbin/praliases
OLD_FILES+=usr/share/doc/smm/08.sendmailop/paper.ascii.gz
OLD_DIRS+=usr/share/doc/smm/08.sendmailop
OLD_FILES+=usr/share/man/man1/mailq.1.gz
OLD_FILES+=usr/share/man/man1/newaliases.1.gz
OLD_FILES+=usr/share/man/man1/vacation.1.gz
OLD_FILES+=usr/share/man/man5/aliases.5.gz
OLD_FILES+=usr/share/man/man8/editmap.8.gz
OLD_FILES+=usr/share/man/man8/hoststat.8.gz
OLD_FILES+=usr/share/man/man8/mail.local.8.gz
OLD_FILES+=usr/share/man/man8/mailstats.8.gz
OLD_FILES+=usr/share/man/man8/makemap.8.gz
OLD_FILES+=usr/share/man/man8/praliases.8.gz
OLD_FILES+=usr/share/man/man8/purgestat.8.gz
OLD_FILES+=usr/share/man/man8/rmail.8.gz
OLD_FILES+=usr/share/man/man8/sendmail.8.gz
OLD_FILES+=usr/share/man/man8/smrsh.8.gz
OLD_FILES+=usr/share/sendmail/cf/README
OLD_FILES+=usr/share/sendmail/cf/cf/Makefile
OLD_FILES+=usr/share/sendmail/cf/cf/README
OLD_FILES+=usr/share/sendmail/cf/cf/chez.cs.mc
OLD_FILES+=usr/share/sendmail/cf/cf/clientproto.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cs-hpux10.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cs-hpux9.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cs-osf1.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cs-solaris2.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cs-sunos4.1.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cs-ultrix4.mc
OLD_FILES+=usr/share/sendmail/cf/cf/cyrusproto.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-bsd4.4.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-hpux10.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-hpux9.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-linux.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-mpeix.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-nextstep3.3.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-osf1.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-solaris.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-sunos4.1.mc
OLD_FILES+=usr/share/sendmail/cf/cf/generic-ultrix4.mc
OLD_FILES+=usr/share/sendmail/cf/cf/huginn.cs.mc
OLD_FILES+=usr/share/sendmail/cf/cf/knecht.mc
OLD_FILES+=usr/share/sendmail/cf/cf/mail.cs.mc
OLD_FILES+=usr/share/sendmail/cf/cf/mail.eecs.mc
OLD_FILES+=usr/share/sendmail/cf/cf/mailspool.cs.mc
OLD_FILES+=usr/share/sendmail/cf/cf/python.cs.mc
OLD_FILES+=usr/share/sendmail/cf/cf/s2k-osf1.mc
OLD_FILES+=usr/share/sendmail/cf/cf/s2k-ultrix4.mc
OLD_FILES+=usr/share/sendmail/cf/cf/submit.cf
OLD_FILES+=usr/share/sendmail/cf/cf/submit.mc
OLD_FILES+=usr/share/sendmail/cf/cf/tcpproto.mc
OLD_FILES+=usr/share/sendmail/cf/cf/ucbarpa.mc
OLD_FILES+=usr/share/sendmail/cf/cf/ucbvax.mc
OLD_FILES+=usr/share/sendmail/cf/cf/uucpproto.mc
OLD_FILES+=usr/share/sendmail/cf/cf/vangogh.cs.mc
OLD_DIRS+=usr/share/sendmail/cf/cf
OLD_FILES+=usr/share/sendmail/cf/domain/Berkeley.EDU.m4
OLD_FILES+=usr/share/sendmail/cf/domain/CS.Berkeley.EDU.m4
OLD_FILES+=usr/share/sendmail/cf/domain/EECS.Berkeley.EDU.m4
OLD_FILES+=usr/share/sendmail/cf/domain/S2K.Berkeley.EDU.m4
OLD_FILES+=usr/share/sendmail/cf/domain/berkeley-only.m4
OLD_FILES+=usr/share/sendmail/cf/domain/generic.m4
OLD_DIRS+=usr/share/sendmail/cf/domain
OLD_FILES+=usr/share/sendmail/cf/feature/accept_unqualified_senders.m4
OLD_FILES+=usr/share/sendmail/cf/feature/accept_unresolvable_domains.m4
OLD_FILES+=usr/share/sendmail/cf/feature/access_db.m4
OLD_FILES+=usr/share/sendmail/cf/feature/allmasquerade.m4
OLD_FILES+=usr/share/sendmail/cf/feature/always_add_domain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/authinfo.m4
OLD_FILES+=usr/share/sendmail/cf/feature/badmx.m4
OLD_FILES+=usr/share/sendmail/cf/feature/bcc.m4
OLD_FILES+=usr/share/sendmail/cf/feature/bestmx_is_local.m4
OLD_FILES+=usr/share/sendmail/cf/feature/bitdomain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/blacklist_recipients.m4
OLD_FILES+=usr/share/sendmail/cf/feature/block_bad_helo.m4
OLD_FILES+=usr/share/sendmail/cf/feature/compat_check.m4
OLD_FILES+=usr/share/sendmail/cf/feature/conncontrol.m4
OLD_FILES+=usr/share/sendmail/cf/feature/delay_checks.m4
OLD_FILES+=usr/share/sendmail/cf/feature/dnsbl.m4
OLD_FILES+=usr/share/sendmail/cf/feature/domaintable.m4
OLD_FILES+=usr/share/sendmail/cf/feature/enhdnsbl.m4
OLD_FILES+=usr/share/sendmail/cf/feature/generics_entire_domain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/genericstable.m4
OLD_FILES+=usr/share/sendmail/cf/feature/greet_pause.m4
OLD_FILES+=usr/share/sendmail/cf/feature/ldap_routing.m4
OLD_FILES+=usr/share/sendmail/cf/feature/limited_masquerade.m4
OLD_FILES+=usr/share/sendmail/cf/feature/local_lmtp.m4
OLD_FILES+=usr/share/sendmail/cf/feature/local_no_masquerade.m4
OLD_FILES+=usr/share/sendmail/cf/feature/local_procmail.m4
OLD_FILES+=usr/share/sendmail/cf/feature/lookupdotdomain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/loose_relay_check.m4
OLD_FILES+=usr/share/sendmail/cf/feature/mailertable.m4
OLD_FILES+=usr/share/sendmail/cf/feature/masquerade_entire_domain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/masquerade_envelope.m4
OLD_FILES+=usr/share/sendmail/cf/feature/msp.m4
OLD_FILES+=usr/share/sendmail/cf/feature/mtamark.m4
OLD_FILES+=usr/share/sendmail/cf/feature/no_default_msa.m4
OLD_FILES+=usr/share/sendmail/cf/feature/nocanonify.m4
OLD_FILES+=usr/share/sendmail/cf/feature/nopercenthack.m4
OLD_FILES+=usr/share/sendmail/cf/feature/notsticky.m4
OLD_FILES+=usr/share/sendmail/cf/feature/nouucp.m4
OLD_FILES+=usr/share/sendmail/cf/feature/nullclient.m4
OLD_FILES+=usr/share/sendmail/cf/feature/prefixmod.m4
OLD_FILES+=usr/share/sendmail/cf/feature/preserve_local_plus_detail.m4
OLD_FILES+=usr/share/sendmail/cf/feature/preserve_luser_host.m4
OLD_FILES+=usr/share/sendmail/cf/feature/promiscuous_relay.m4
OLD_FILES+=usr/share/sendmail/cf/feature/queuegroup.m4
OLD_FILES+=usr/share/sendmail/cf/feature/ratecontrol.m4
OLD_FILES+=usr/share/sendmail/cf/feature/redirect.m4
OLD_FILES+=usr/share/sendmail/cf/feature/relay_based_on_MX.m4
OLD_FILES+=usr/share/sendmail/cf/feature/relay_entire_domain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/relay_hosts_only.m4
OLD_FILES+=usr/share/sendmail/cf/feature/relay_local_from.m4
OLD_FILES+=usr/share/sendmail/cf/feature/relay_mail_from.m4
OLD_FILES+=usr/share/sendmail/cf/feature/require_rdns.m4
OLD_FILES+=usr/share/sendmail/cf/feature/smrsh.m4
OLD_FILES+=usr/share/sendmail/cf/feature/stickyhost.m4
OLD_FILES+=usr/share/sendmail/cf/feature/tls_session_features.m4
OLD_FILES+=usr/share/sendmail/cf/feature/use_client_ptr.m4
OLD_FILES+=usr/share/sendmail/cf/feature/use_ct_file.m4
OLD_FILES+=usr/share/sendmail/cf/feature/use_cw_file.m4
OLD_FILES+=usr/share/sendmail/cf/feature/uucpdomain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/virtuser_entire_domain.m4
OLD_FILES+=usr/share/sendmail/cf/feature/virtusertable.m4
OLD_DIRS+=usr/share/sendmail/cf/feature
OLD_FILES+=usr/share/sendmail/cf/hack/cssubdomain.m4
OLD_FILES+=usr/share/sendmail/cf/hack/xconnect.m4
OLD_DIRS+=usr/share/sendmail/cf/hack
OLD_FILES+=usr/share/sendmail/cf/m4/cf.m4
OLD_FILES+=usr/share/sendmail/cf/m4/cfhead.m4
OLD_FILES+=usr/share/sendmail/cf/m4/proto.m4
OLD_FILES+=usr/share/sendmail/cf/m4/version.m4
OLD_DIRS+=usr/share/sendmail/cf/m4
OLD_FILES+=usr/share/sendmail/cf/mailer/cyrus.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/cyrusv2.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/fax.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/local.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/mail11.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/phquery.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/pop.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/procmail.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/qpage.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/smtp.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/usenet.m4
OLD_FILES+=usr/share/sendmail/cf/mailer/uucp.m4
OLD_DIRS+=usr/share/sendmail/cf/mailer
OLD_FILES+=usr/share/sendmail/cf/ostype/a-ux.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/aix3.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/aix4.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/aix5.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/altos.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/amdahl-uts.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/bsd4.3.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/bsd4.4.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/bsdi.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/bsdi1.0.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/bsdi2.0.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/darwin.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/dgux.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/domainos.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/dragonfly.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/dynix3.2.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/freebsd4.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/freebsd5.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/freebsd6.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/gnu.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/hpux10.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/hpux11.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/hpux9.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/irix4.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/irix5.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/irix6.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/isc4.1.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/linux.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/maxion.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/mklinux.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/mpeix.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/nextstep.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/openbsd.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/osf1.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/powerux.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/ptx2.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/qnx.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/riscos4.5.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/sco-uw-2.1.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/sco3.2.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/sinix.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/solaris11.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/solaris2.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/solaris2.ml.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/solaris2.pre5.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/solaris8.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/sunos3.5.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/sunos4.1.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/svr4.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/ultrix4.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/unicos.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/unicosmk.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/unicosmp.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/unixware7.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/unknown.m4
OLD_FILES+=usr/share/sendmail/cf/ostype/uxpds.m4
OLD_DIRS+=usr/share/sendmail/cf/ostype
OLD_FILES+=usr/share/sendmail/cf/sendmail.schema
OLD_FILES+=usr/share/sendmail/cf/sh/makeinfo.sh
OLD_DIRS+=usr/share/sendmail/cf/sh
OLD_FILES+=usr/share/sendmail/cf/siteconfig/uucp.cogsci.m4
OLD_FILES+=usr/share/sendmail/cf/siteconfig/uucp.old.arpa.m4
OLD_FILES+=usr/share/sendmail/cf/siteconfig/uucp.ucbarpa.m4
OLD_FILES+=usr/share/sendmail/cf/siteconfig/uucp.ucbvax.m4
OLD_DIRS+=usr/share/sendmail/cf/siteconfig
OLD_DIRS+=usr/share/sendmail/cf
OLD_DIRS+=usr/share/sendmail
.endif
.if ${MK_SERVICESDB} == no
OLD_FILES+=var/db/services.db
.endif
.if ${MK_SHAREDOCS} == no
OLD_FILES+=usr/share/doc/pjdfstest/README
OLD_DIRS+=usr/share/doc/pjdfstest
.endif
.if ${MK_SSP} == no
OLD_LIBS+=lib/libssp.so.0
OLD_FILES+=usr/include/ssp/ssp.h
OLD_FILES+=usr/include/ssp/stdio.h
OLD_FILES+=usr/include/ssp/string.h
OLD_FILES+=usr/include/ssp/unistd.h
OLD_FILES+=usr/lib/libssp.a
OLD_FILES+=usr/lib/libssp.so
OLD_FILES+=usr/lib/libssp_nonshared.a
OLD_FILES+=usr/lib32/libssp.a
OLD_FILES+=usr/lib32/libssp.so
OLD_LIBS+=usr/lib32/libssp.so.0
OLD_FILES+=usr/lib32/libssp_nonshared.a
OLD_FILES+=usr/tests/lib/libc/ssp/Kyuafile
OLD_FILES+=usr/tests/lib/libc/ssp/h_fgets
OLD_FILES+=usr/tests/lib/libc/ssp/h_getcwd
OLD_FILES+=usr/tests/lib/libc/ssp/h_gets
OLD_FILES+=usr/tests/lib/libc/ssp/h_memcpy
OLD_FILES+=usr/tests/lib/libc/ssp/h_memmove
OLD_FILES+=usr/tests/lib/libc/ssp/h_memset
OLD_FILES+=usr/tests/lib/libc/ssp/h_read
OLD_FILES+=usr/tests/lib/libc/ssp/h_readlink
OLD_FILES+=usr/tests/lib/libc/ssp/h_snprintf
OLD_FILES+=usr/tests/lib/libc/ssp/h_sprintf
OLD_FILES+=usr/tests/lib/libc/ssp/h_stpcpy
OLD_FILES+=usr/tests/lib/libc/ssp/h_stpncpy
OLD_FILES+=usr/tests/lib/libc/ssp/h_strcat
OLD_FILES+=usr/tests/lib/libc/ssp/h_strcpy
OLD_FILES+=usr/tests/lib/libc/ssp/h_strncat
OLD_FILES+=usr/tests/lib/libc/ssp/h_strncpy
OLD_FILES+=usr/tests/lib/libc/ssp/h_vsnprintf
OLD_FILES+=usr/tests/lib/libc/ssp/h_vsprintf
OLD_FILES+=usr/tests/lib/libc/ssp/ssp_test
.endif
.if ${MK_SYSCONS} == no
OLD_FILES+=usr/share/syscons/fonts/INDEX.fonts
OLD_FILES+=usr/share/syscons/fonts/armscii8-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/armscii8-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/armscii8-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp1251-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/cp1251-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp1251-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp437-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/cp437-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp437-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp437-thin-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp437-thin-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp850-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/cp850-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp850-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp850-thin-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp850-thin-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp865-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/cp865-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp865-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp865-thin-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp865-thin-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866b-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866c-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866u-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866u-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/cp866u-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/haik8-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/haik8-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/haik8-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso-thin-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso02-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso02-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso02-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-vga9-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-vga9-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-vga9-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-vga9-wide-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso04-wide-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso05-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso05-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso05-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso07-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso07-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso07-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso08-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso08-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso08-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso09-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso15-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/iso15-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/iso15-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/iso15-thin-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-r-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-r-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-r-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-rb-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-rc-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-u-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-u-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/koi8-u-8x8.fnt
OLD_FILES+=usr/share/syscons/fonts/swiss-1131-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/swiss-1251-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/swiss-8x14.fnt
OLD_FILES+=usr/share/syscons/fonts/swiss-8x16.fnt
OLD_FILES+=usr/share/syscons/fonts/swiss-8x8.fnt
OLD_FILES+=usr/share/syscons/keymaps/INDEX.keymaps
OLD_FILES+=usr/share/syscons/keymaps/be.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/be.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/bg.bds.ctrlcaps.kbd
OLD_FILES+=usr/share/syscons/keymaps/bg.phonetic.ctrlcaps.kbd
OLD_FILES+=usr/share/syscons/keymaps/br275.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/br275.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/br275.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/by.cp1131.kbd
OLD_FILES+=usr/share/syscons/keymaps/by.cp1251.kbd
OLD_FILES+=usr/share/syscons/keymaps/by.iso5.kbd
OLD_FILES+=usr/share/syscons/keymaps/ce.iso2.kbd
OLD_FILES+=usr/share/syscons/keymaps/colemak.iso15.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/cs.latin2.qwertz.kbd
OLD_FILES+=usr/share/syscons/keymaps/cz.iso2.kbd
OLD_FILES+=usr/share/syscons/keymaps/danish.cp865.kbd
OLD_FILES+=usr/share/syscons/keymaps/danish.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/danish.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/danish.iso.macbook.kbd
OLD_FILES+=usr/share/syscons/keymaps/dutch.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/eee_nordic.kbd
OLD_FILES+=usr/share/syscons/keymaps/el.iso07.kbd
OLD_FILES+=usr/share/syscons/keymaps/estonian.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/estonian.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/estonian.iso15.kbd
OLD_FILES+=usr/share/syscons/keymaps/finnish.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/finnish.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/fr.dvorak.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/fr.dvorak.kbd
OLD_FILES+=usr/share/syscons/keymaps/fr.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/fr.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/fr.macbook.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/fr_CA.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/german.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/german.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/german.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/gr.elot.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/gr.us101.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/hr.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/hu.iso2.101keys.kbd
OLD_FILES+=usr/share/syscons/keymaps/hu.iso2.102keys.kbd
OLD_FILES+=usr/share/syscons/keymaps/hy.armscii-8.kbd
OLD_FILES+=usr/share/syscons/keymaps/icelandic.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/icelandic.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/it.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/iw.iso8.kbd
OLD_FILES+=usr/share/syscons/keymaps/jp.106.kbd
OLD_FILES+=usr/share/syscons/keymaps/jp.106x.kbd
OLD_FILES+=usr/share/syscons/keymaps/kk.pt154.io.kbd
OLD_FILES+=usr/share/syscons/keymaps/kk.pt154.kst.kbd
OLD_FILES+=usr/share/syscons/keymaps/latinamerican.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/latinamerican.kbd
OLD_FILES+=usr/share/syscons/keymaps/lt.iso4.kbd
OLD_FILES+=usr/share/syscons/keymaps/norwegian.dvorak.kbd
OLD_FILES+=usr/share/syscons/keymaps/norwegian.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/pl_PL.ISO8859-2.kbd
OLD_FILES+=usr/share/syscons/keymaps/pl_PL.dvorak.kbd
OLD_FILES+=usr/share/syscons/keymaps/pt.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/pt.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/ru.cp866.kbd
OLD_FILES+=usr/share/syscons/keymaps/ru.iso5.kbd
OLD_FILES+=usr/share/syscons/keymaps/ru.koi8-r.kbd
OLD_FILES+=usr/share/syscons/keymaps/ru.koi8-r.shift.kbd
OLD_FILES+=usr/share/syscons/keymaps/ru.koi8-r.win.kbd
OLD_FILES+=usr/share/syscons/keymaps/si.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/sk.iso2.kbd
OLD_FILES+=usr/share/syscons/keymaps/spanish.dvorak.kbd
OLD_FILES+=usr/share/syscons/keymaps/spanish.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/spanish.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/spanish.iso15.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/swedish.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/swedish.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissfrench.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissfrench.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissfrench.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissgerman.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissgerman.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissgerman.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/swissgerman.macbook.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/tr.iso9.q.kbd
OLD_FILES+=usr/share/syscons/keymaps/ua.iso5.kbd
OLD_FILES+=usr/share/syscons/keymaps/ua.koi8-u.kbd
OLD_FILES+=usr/share/syscons/keymaps/ua.koi8-u.shift.alt.kbd
OLD_FILES+=usr/share/syscons/keymaps/uk.cp850-ctrl.kbd
OLD_FILES+=usr/share/syscons/keymaps/uk.cp850.kbd
OLD_FILES+=usr/share/syscons/keymaps/uk.dvorak.kbd
OLD_FILES+=usr/share/syscons/keymaps/uk.iso-ctrl.kbd
OLD_FILES+=usr/share/syscons/keymaps/uk.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.dvorak.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.dvorakl.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.dvorakp.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.dvorakr.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.dvorakx.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.emacs.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.iso.acc.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.iso.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.pc-ctrl.kbd
OLD_FILES+=usr/share/syscons/keymaps/us.unix.kbd
OLD_FILES+=usr/share/syscons/scrnmaps/armscii8-2haik8.scm
OLD_FILES+=usr/share/syscons/scrnmaps/iso-8859-1_to_cp437.scm
OLD_FILES+=usr/share/syscons/scrnmaps/iso-8859-4_for_vga9.scm
OLD_FILES+=usr/share/syscons/scrnmaps/iso-8859-7_to_cp437.scm
OLD_FILES+=usr/share/syscons/scrnmaps/koi8-r2cp866.scm
OLD_FILES+=usr/share/syscons/scrnmaps/koi8-u2cp866u.scm
OLD_FILES+=usr/share/syscons/scrnmaps/us-ascii_to_cp437.scm
OLD_DIRS+=usr/share/syscons/fonts
OLD_DIRS+=usr/share/syscons/scrnmaps
OLD_DIRS+=usr/share/syscons/keymaps
OLD_DIRS+=usr/share/syscons
.endif
.if ${MK_TALK} == no
OLD_FILES+=usr/bin/talk
OLD_FILES+=usr/libexec/ntalkd
OLD_FILES+=usr/share/man/man1/talk.1.gz
OLD_FILES+=usr/share/man/man8/talkd.8.gz
.endif
.if ${MK_TCSH} == no
OLD_FILES+=.cshrc
OLD_FILES+=etc/csh.cshrc
OLD_FILES+=etc/csh.login
OLD_FILES+=etc/csh.logout
OLD_FILES+=bin/csh
OLD_FILES+=bin/tcsh
OLD_FILES+=rescue/csh
OLD_FILES+=rescue/tcsh
OLD_FILES+=root/.cshrc
OLD_FILES+=root/.login
OLD_FILES+=usr/share/examples/etc/csh.cshrc
OLD_FILES+=usr/share/examples/etc/csh.login
OLD_FILES+=usr/share/examples/etc/csh.logout
OLD_FILES+=usr/share/examples/tcsh/complete.tcsh
OLD_FILES+=usr/share/examples/tcsh/csh-mode.el
OLD_DIRS+=usr/share/examples/tcsh
OLD_FILES+=usr/share/man/man1/csh.1.gz
OLD_FILES+=usr/share/man/man1/tcsh.1.gz
OLD_FILES+=usr/share/nls/de_AT.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/de_AT.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/de_AT.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/de_CH.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/de_CH.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/de_CH.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/de_DE.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/de_DE.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/de_DE.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/el_GR.ISO8859-7/tcsh.cat
OLD_FILES+=usr/share/nls/el_GR.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/es_ES.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/es_ES.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/es_ES.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/et_EE.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/et_EE.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fi_FI.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/fi_FI.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/fi_FI.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_BE.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/fr_BE.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/fr_BE.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CA.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CA.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CA.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CH.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CH.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/fr_CH.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/fr_FR.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/fr_FR.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/it_CH.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/it_CH.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/it_CH.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/it_IT.ISO8859-1/tcsh.cat
OLD_FILES+=usr/share/nls/it_IT.ISO8859-15/tcsh.cat
OLD_FILES+=usr/share/nls/it_IT.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/ja_JP.SJIS/tcsh.cat
OLD_FILES+=usr/share/nls/ja_JP.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/ja_JP.eucJP/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.CP1251/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.CP866/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.ISO8859-5/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.KOI8-R/tcsh.cat
OLD_FILES+=usr/share/nls/ru_RU.UTF-8/tcsh.cat
OLD_FILES+=usr/share/nls/uk_UA.ISO8859-5/tcsh.cat
OLD_FILES+=usr/share/nls/uk_UA.KOI8-U/tcsh.cat
OLD_FILES+=usr/share/nls/uk_UA.UTF-8/tcsh.cat
.endif
.if ${MK_TELNET} == no
OLD_FILES+=etc/pam.d/telnetd
OLD_FILES+=usr/bin/telnet
OLD_FILES+=usr/libexec/telnetd
OLD_FILES+=usr/share/man/man1/telnet.1.gz
OLD_FILES+=usr/share/man/man8/telnetd.8.gz
.endif
.if ${MK_TESTS} == yes
OLD_FILES+=usr/bin/atf-sh
OLD_FILES+=usr/include/atf-c++/config.hpp
OLD_FILES+=usr/include/atf-c/config.h
OLD_LIBS+=usr/lib/libatf-c++.a
OLD_LIBS+=usr/lib/libatf-c++.so
OLD_LIBS+=usr/lib/libatf-c++.so.1
OLD_LIBS+=usr/lib/libatf-c++.so.2
OLD_LIBS+=usr/lib/libatf-c++_p.a
OLD_LIBS+=usr/lib/libatf-c.a
OLD_LIBS+=usr/lib/libatf-c.so
OLD_LIBS+=usr/lib/libatf-c.so.1
OLD_LIBS+=usr/lib/libatf-c_p.a
OLD_LIBS+=usr/lib/private/libatf-c.so.0
OLD_LIBS+=usr/lib/private/libatf-c++.so.1
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_LIBS+=usr/lib32/libatf-c++.a
OLD_LIBS+=usr/lib32/libatf-c++.so
OLD_LIBS+=usr/lib32/libatf-c++.so.1
OLD_LIBS+=usr/lib32/libatf-c++.so.2
OLD_LIBS+=usr/lib32/libatf-c++_p.a
OLD_LIBS+=usr/lib32/libatf-c.a
OLD_LIBS+=usr/lib32/libatf-c.so
OLD_LIBS+=usr/lib32/libatf-c.so.1
OLD_LIBS+=usr/lib32/libatf-c_p.a
OLD_LIBS+=usr/lib32/private/libatf-c.so.0
OLD_LIBS+=usr/lib32/private/libatf-c++.so.1
.endif
OLD_FILES+=usr/libdata/pkgconfig/atf-c++.pc
OLD_FILES+=usr/libdata/pkgconfig/atf-c.pc
OLD_FILES+=usr/libdata/pkgconfig/atf-sh.pc
OLD_FILES+=usr/share/aclocal/atf-c++.m4
OLD_FILES+=usr/share/aclocal/atf-c.m4
OLD_FILES+=usr/share/aclocal/atf-common.m4
OLD_FILES+=usr/share/aclocal/atf-sh.m4
OLD_DIRS+=usr/share/aclocal
OLD_DIRS+=usr/tests/bin/chown
OLD_FILES+=usr/tests/bin/chown/Kyuafile
OLD_FILES+=usr/tests/bin/chown/chown-f_test
OLD_FILES+=usr/tests/bin/chown/units_basics
OLD_FILES+=usr/tests/bin/date/legacy_test
OLD_FILES+=usr/tests/bin/sh/legacy_test
OLD_FILES+=usr/tests/usr.bin/atf/Kyuafile
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/Kyuafile
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/atf_check_test
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/config_test
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/integration_test
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/misc_helpers
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/normalize_test
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/tc_test
OLD_FILES+=usr/tests/usr.bin/atf/atf-sh/tp_test
OLD_DIRS+=usr/tests/usr.bin/atf/atf-sh
OLD_DIRS+=usr/tests/usr.bin/atf
OLD_FILES+=usr/tests/lib/atf/libatf-c/test_helpers_test
OLD_FILES+=usr/tests/lib/atf/test-programs/fork_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/application_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/config_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/expand_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/parser_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/sanity_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/ui_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/env_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/exceptions_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/expand_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/fs_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/parser_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/process_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/sanity_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/pkg_config_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/text_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/ui_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/config_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/dynstr_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/env_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/fs_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/list_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/map_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/pkg_config_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/process_helpers
OLD_FILES+=usr/tests/lib/atf/libatf-c/process_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/sanity_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/text_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/user_test
.if ${MK_MAKE} == yes
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.status.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stderr.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/expected.stdout.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd/libtest.a
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.status.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stderr.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/expected.stdout.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod/libtest.a
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.status.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stderr.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.6
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/expected.stdout.7
OLD_FILES+=usr/tests/usr.bin/make/archives/fmt_oldbsd/libtest.a
OLD_FILES+=usr/tests/usr.bin/make/archives/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/basic/t0/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/basic/t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/basic/t0/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t0/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t0/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t1/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/basic/t1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/basic/t1/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/basic/t1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t2/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/basic/t2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/basic/t2/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/basic/t2/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t2/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t2/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t3/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/basic/t3/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/basic/t3/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t3/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/basic/t3/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/basic/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/execution/ellipsis/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/execution/ellipsis/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/execution/ellipsis/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/execution/ellipsis/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/execution/ellipsis/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/execution/ellipsis/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/execution/empty/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/execution/empty/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/execution/empty/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/execution/empty/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/execution/empty/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/execution/empty/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/execution/joberr/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/execution/joberr/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/execution/joberr/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/execution/joberr/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/execution/joberr/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/execution/joberr/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/execution/plus/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/execution/plus/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/execution/plus/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/execution/plus/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/execution/plus/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/execution/plus/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/execution/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/shell/builtin/sh
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/shell/meta/sh
OLD_FILES+=usr/tests/usr.bin/make/shell/path/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/shell/path/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/path/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/shell/path/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/shell/path/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/shell/path/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/shell/path/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/shell/path/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/shell/path/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/shell/path/sh
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/shell/path_select/shell
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/shell/replace/shell
OLD_FILES+=usr/tests/usr.bin/make/shell/select/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/shell/select/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/shell/select/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/shell/select/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/shell/select/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/shell/select/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/shell/select/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/shell/select/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/shell/select/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/shell/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/TEST1.a
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/basic/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/TEST1.a
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/TEST2.a
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/TEST1.a
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/TEST2.a
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/src_wild2/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/suffixes/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/syntax/directive-t0/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/syntax/directive-t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/syntax/directive-t0/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/syntax/directive-t0/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/directive-t0/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/directive-t0/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.status.3
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.status.4
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.status.5
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stderr.4
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stderr.5
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stdout.4
OLD_FILES+=usr/tests/usr.bin/make/syntax/enl/expected.stdout.5
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/funny-targets/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/syntax/semi/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/syntax/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/1/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/1/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/mk/sys.mk
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/mk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/1/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/1/cleanup
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/mk/sys.mk
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/mk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/1/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/1/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/1/cleanup
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/1/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/1/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/1/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/mk/sys.mk
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/mk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/t2/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/sysmk/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_M/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_M/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_M/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_M/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_M/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_M/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.status.3
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.stderr.3
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/variables/modifier_t/expected.stdout.3
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/expected.status.2
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/expected.stderr.2
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/variables/opt_V/expected.stdout.2
OLD_FILES+=usr/tests/usr.bin/make/variables/t0/legacy_test
OLD_FILES+=usr/tests/usr.bin/make/variables/t0/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/variables/t0/Makefile.test
OLD_FILES+=usr/tests/usr.bin/make/variables/t0/expected.status.1
OLD_FILES+=usr/tests/usr.bin/make/variables/t0/expected.stderr.1
OLD_FILES+=usr/tests/usr.bin/make/variables/t0/expected.stdout.1
OLD_FILES+=usr/tests/usr.bin/make/variables/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/Kyuafile
OLD_FILES+=usr/tests/usr.bin/make/common.sh
OLD_FILES+=usr/tests/usr.bin/make/test-new.mk
OLD_DIRS+=usr/tests/usr.bin/make/variables/t0
OLD_DIRS+=usr/tests/usr.bin/make/variables/opt_V
OLD_DIRS+=usr/tests/usr.bin/make/variables/modifier_t
OLD_DIRS+=usr/tests/usr.bin/make/variables/modifier_M
OLD_DIRS+=usr/tests/usr.bin/make/variables
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t2/mk
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t2/2/1
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t2/2
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t2
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t1/mk
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t1/2/1
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t1/2
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t1
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t0/mk
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t0/2/1
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t0/2
OLD_DIRS+=usr/tests/usr.bin/make/sysmk/t0
OLD_DIRS+=usr/tests/usr.bin/make/sysmk
OLD_DIRS+=usr/tests/usr.bin/make/syntax/semi
OLD_DIRS+=usr/tests/usr.bin/make/syntax/funny-targets
OLD_DIRS+=usr/tests/usr.bin/make/syntax/enl
OLD_DIRS+=usr/tests/usr.bin/make/syntax/directive-t0
OLD_DIRS+=usr/tests/usr.bin/make/syntax
OLD_DIRS+=usr/tests/usr.bin/make/suffixes/src_wild2
OLD_DIRS+=usr/tests/usr.bin/make/suffixes/src_wild1
OLD_DIRS+=usr/tests/usr.bin/make/suffixes/basic
OLD_DIRS+=usr/tests/usr.bin/make/suffixes
OLD_DIRS+=usr/tests/usr.bin/make/shell/select
OLD_DIRS+=usr/tests/usr.bin/make/shell/replace
OLD_DIRS+=usr/tests/usr.bin/make/shell/path_select
OLD_DIRS+=usr/tests/usr.bin/make/shell/path
OLD_DIRS+=usr/tests/usr.bin/make/shell/meta
OLD_DIRS+=usr/tests/usr.bin/make/shell/builtin
OLD_DIRS+=usr/tests/usr.bin/make/shell
OLD_DIRS+=usr/tests/usr.bin/make/execution/plus
OLD_DIRS+=usr/tests/usr.bin/make/execution/joberr
OLD_DIRS+=usr/tests/usr.bin/make/execution/empty
OLD_DIRS+=usr/tests/usr.bin/make/execution/ellipsis
OLD_DIRS+=usr/tests/usr.bin/make/execution
OLD_DIRS+=usr/tests/usr.bin/make/basic/t3
OLD_DIRS+=usr/tests/usr.bin/make/basic/t2
OLD_DIRS+=usr/tests/usr.bin/make/basic/t1
OLD_DIRS+=usr/tests/usr.bin/make/basic/t0
OLD_DIRS+=usr/tests/usr.bin/make/basic
OLD_DIRS+=usr/tests/usr.bin/make/archives/fmt_oldbsd
OLD_DIRS+=usr/tests/usr.bin/make/archives/fmt_44bsd_mod
OLD_DIRS+=usr/tests/usr.bin/make/archives/fmt_44bsd
OLD_DIRS+=usr/tests/usr.bin/make/archives
OLD_DIRS+=usr/tests/usr.bin/make
OLD_FILES+=usr/tests/usr.bin/yacc/legacy_test
OLD_FILES+=usr/tests/usr.bin/yacc/regress.00.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.01.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.02.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.03.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.04.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.05.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.06.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.07.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.08.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.09.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.10.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.11.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.12.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.13.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.14.out
OLD_FILES+=usr/tests/usr.bin/yacc/regress.sh
OLD_FILES+=usr/tests/usr.bin/yacc/undefined.y
.endif
.else
# ATF libraries.
OLD_FILES+=etc/mtree/BSD.tests.dist
OLD_FILES+=usr/bin/atf-sh
OLD_DIRS+=usr/include/atf-c
OLD_FILES+=usr/include/atf-c/build.h
OLD_FILES+=usr/include/atf-c/check.h
OLD_FILES+=usr/include/atf-c/config.h
OLD_FILES+=usr/include/atf-c/defs.h
OLD_FILES+=usr/include/atf-c/error.h
OLD_FILES+=usr/include/atf-c/error_fwd.h
OLD_FILES+=usr/include/atf-c/macros.h
OLD_FILES+=usr/include/atf-c/tc.h
OLD_FILES+=usr/include/atf-c/tp.h
OLD_FILES+=usr/include/atf-c/utils.h
OLD_FILES+=usr/include/atf-c.h
OLD_DIRS+=usr/include/atf-c++
OLD_FILES+=usr/include/atf-c++/build.hpp
OLD_FILES+=usr/include/atf-c++/check.hpp
OLD_FILES+=usr/include/atf-c++/config.hpp
OLD_FILES+=usr/include/atf-c++/macros.hpp
OLD_FILES+=usr/include/atf-c++/tests.hpp
OLD_FILES+=usr/include/atf-c++/utils.hpp
OLD_FILES+=usr/include/atf-c++.hpp
OLD_FILES+=usr/lib/libatf-c_p.a
OLD_FILES+=usr/lib/libatf-c.so.1
OLD_FILES+=usr/lib/libatf-c.so
OLD_FILES+=usr/lib/libatf-c++.a
OLD_FILES+=usr/lib/libatf-c++_p.a
OLD_FILES+=usr/lib/libatf-c++.so.1
OLD_FILES+=usr/lib/libatf-c++.so
OLD_FILES+=usr/lib/libatf-c.a
OLD_FILES+=usr/libexec/atf-check
OLD_FILES+=usr/libexec/atf-sh
OLD_DIRS+=usr/share/atf
OLD_FILES+=usr/share/atf/libatf-sh.subr
OLD_DIRS+=usr/share/doc/atf
OLD_FILES+=usr/share/doc/atf/AUTHORS
OLD_FILES+=usr/share/doc/atf/COPYING
OLD_FILES+=usr/share/doc/atf/NEWS
OLD_FILES+=usr/share/doc/atf/README
OLD_FILES+=usr/share/doc/pjdfstest/README
OLD_FILES+=usr/share/man/man1/atf-check.1.gz
OLD_FILES+=usr/share/man/man1/atf-sh.1.gz
OLD_FILES+=usr/share/man/man1/atf-test-program.1.gz
OLD_FILES+=usr/share/man/man3/atf-c-api.3.gz
OLD_FILES+=usr/share/man/man3/atf-c++-api.3.gz
OLD_FILES+=usr/share/man/man3/atf-sh-api.3.gz
OLD_FILES+=usr/share/man/man3/atf-sh.3.gz
OLD_FILES+=usr/share/man/man4/atf-test-case.4.gz
OLD_FILES+=usr/share/man/man7/atf.7.gz
OLD_FILES+=usr/share/mk/atf.test.mk
OLD_FILES+=usr/share/mk/plain.test.mk
OLD_FILES+=usr/share/mk/suite.test.mk
OLD_FILES+=usr/share/mk/tap.test.mk
# Test suite.
. if exists(${DESTDIR}${TESTSBASE})
TESTS_DIRS!=find ${DESTDIR}${TESTSBASE} -type d | sed -e 's,^${DESTDIR}/,,'; echo
OLD_DIRS+=${TESTS_DIRS}
TESTS_FILES!=find ${DESTDIR}${TESTSBASE} \! -type d | sed -e 's,^${DESTDIR}/,,'; echo
OLD_FILES+=${TESTS_FILES}
. endif
.endif # Test suite.
.if ${MK_TESTS_SUPPORT} == no
OLD_FILES+=usr/include/atf-c++.hpp
OLD_FILES+=usr/include/atf-c++/build.hpp
OLD_FILES+=usr/include/atf-c++/check.hpp
OLD_FILES+=usr/include/atf-c++/macros.hpp
OLD_FILES+=usr/include/atf-c++/tests.hpp
OLD_FILES+=usr/include/atf-c++/utils.hpp
OLD_FILES+=usr/include/atf-c.h
OLD_FILES+=usr/include/atf-c/build.h
OLD_FILES+=usr/include/atf-c/check.h
OLD_FILES+=usr/include/atf-c/defs.h
OLD_FILES+=usr/include/atf-c/error.h
OLD_FILES+=usr/include/atf-c/error_fwd.h
OLD_FILES+=usr/include/atf-c/macros.h
OLD_FILES+=usr/include/atf-c/tc.h
OLD_FILES+=usr/include/atf-c/tp.h
OLD_FILES+=usr/include/atf-c/utils.h
OLD_LIBS+=usr/lib/private/libatf-c++.so.2
OLD_LIBS+=usr/lib/private/libatf-c.so.1
OLD_FILES+=usr/share/man/man3/atf-c++.3.gz
OLD_FILES+=usr/share/man/man3/atf-c-api++.3.gz
OLD_FILES+=usr/share/man/man3/atf-c-api.3.gz
OLD_FILES+=usr/share/man/man3/atf-c.3.gz
OLD_FILES+=usr/tests/lib/atf/Kyuafile
OLD_FILES+=usr/tests/lib/atf/libatf-c++/Kyuafile
OLD_FILES+=usr/tests/lib/atf/libatf-c++/atf_c++_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/build_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/check_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/Kyuafile
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/application_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/env_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/exceptions_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/fs_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/process_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/text_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/detail/version_helper
OLD_FILES+=usr/tests/lib/atf/libatf-c++/macros_hpp_test.cpp
OLD_FILES+=usr/tests/lib/atf/libatf-c++/macros_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/tests_test
OLD_FILES+=usr/tests/lib/atf/libatf-c++/unused_test.cpp
OLD_FILES+=usr/tests/lib/atf/libatf-c++/utils_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/Kyuafile
OLD_FILES+=usr/tests/lib/atf/libatf-c/atf_c_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/build_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/check_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/Kyuafile
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/dynstr_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/env_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/fs_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/list_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/map_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/process_helpers
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/process_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/sanity_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/text_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/user_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/detail/version_helper
OLD_FILES+=usr/tests/lib/atf/libatf-c/error_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/macros_h_test.c
OLD_FILES+=usr/tests/lib/atf/libatf-c/macros_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/tc_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/tp_test
OLD_FILES+=usr/tests/lib/atf/libatf-c/unused_test.c
OLD_FILES+=usr/tests/lib/atf/libatf-c/utils_test
OLD_FILES+=usr/tests/lib/atf/test-programs/Kyuafile
OLD_FILES+=usr/tests/lib/atf/test-programs/c_helpers
OLD_FILES+=usr/tests/lib/atf/test-programs/config_test
OLD_FILES+=usr/tests/lib/atf/test-programs/cpp_helpers
OLD_FILES+=usr/tests/lib/atf/test-programs/expect_test
OLD_FILES+=usr/tests/lib/atf/test-programs/meta_data_test
OLD_FILES+=usr/tests/lib/atf/test-programs/result_test
OLD_FILES+=usr/tests/lib/atf/test-programs/sh_helpers
OLD_FILES+=usr/tests/lib/atf/test-programs/srcdir_test
.endif
.if ${MK_TEXTPROC} == no
OLD_FILES+=usr/bin/checknr
OLD_FILES+=usr/bin/colcrt
OLD_FILES+=usr/bin/ul
OLD_FILES+=usr/share/man/man1/checknr.1.gz
OLD_FILES+=usr/share/man/man1/colcrt.1.gz
OLD_FILES+=usr/share/man/man1/ul.1.gz
.endif
.if ${MK_TFTP} == no
OLD_FILES+=usr/bin/tftp
OLD_FILES+=usr/libexec/tftpd
OLD_FILES+=usr/share/man/man1/tftp.1.gz
OLD_FILES+=usr/share/man/man8/tftpd.8.gz
.endif
.if ${MK_TIMED} == no
OLD_FILES+=usr/sbin/timed
OLD_FILES+=usr/sbin/timedc
OLD_FILES+=usr/share/man/man8/timed.8.gz
OLD_FILES+=usr/share/man/man8/timedc.8.gz
.endif
.if ${MK_TOOLCHAIN} == no
OLD_FILES+=usr/bin/addr2line
OLD_FILES+=usr/bin/as
OLD_FILES+=usr/bin/byacc
OLD_FILES+=usr/bin/cc
OLD_FILES+=usr/bin/c88
OLD_FILES+=usr/bin/c++
OLD_FILES+=usr/bin/c++filt
OLD_FILES+=usr/bin/ld
OLD_FILES+=usr/bin/ld.bfd
OLD_FILES+=usr/bin/nm
OLD_FILES+=usr/bin/objcopy
OLD_FILES+=usr/bin/readelf
OLD_FILES+=usr/bin/size
OLD_FILES+=usr/bin/strip
OLD_FILES+=usr/bin/yacc
OLD_FILES+=usr/share/man/man1/addr2line.1.gz
OLD_FILES+=usr/share/man/man1/c++filt.1.gz
OLD_FILES+=usr/share/man/man1/nm.1.gz
OLD_FILES+=usr/share/man/man1/readelf.1.gz
OLD_FILES+=usr/share/man/man1/size.1.gz
OLD_FILES+=usr/share/man/man1/strip.1.gz
OLD_FILES+=usr/share/man/man1/objcopy.1.gz
# lib/libelf
OLD_FILES+=usr/share/man/man3/elf.3.gz
OLD_FILES+=usr/share/man/man3/elf_begin.3.gz
OLD_FILES+=usr/share/man/man3/elf_cntl.3.gz
OLD_FILES+=usr/share/man/man3/elf_end.3.gz
OLD_FILES+=usr/share/man/man3/elf_errmsg.3.gz
OLD_FILES+=usr/share/man/man3/elf_fill.3.gz
OLD_FILES+=usr/share/man/man3/elf_flagdata.3.gz
OLD_FILES+=usr/share/man/man3/elf_getarhdr.3.gz
OLD_FILES+=usr/share/man/man3/elf_getarsym.3.gz
OLD_FILES+=usr/share/man/man3/elf_getbase.3.gz
OLD_FILES+=usr/share/man/man3/elf_getdata.3.gz
OLD_FILES+=usr/share/man/man3/elf_getident.3.gz
OLD_FILES+=usr/share/man/man3/elf_getscn.3.gz
OLD_FILES+=usr/share/man/man3/elf_getphdrnum.3.gz
OLD_FILES+=usr/share/man/man3/elf_getphnum.3.gz
OLD_FILES+=usr/share/man/man3/elf_getshdrnum.3.gz
OLD_FILES+=usr/share/man/man3/elf_getshnum.3.gz
OLD_FILES+=usr/share/man/man3/elf_getshdrstrndx.3.gz
OLD_FILES+=usr/share/man/man3/elf_getshstrndx.3.gz
OLD_FILES+=usr/share/man/man3/elf_hash.3.gz
OLD_FILES+=usr/share/man/man3/elf_kind.3.gz
OLD_FILES+=usr/share/man/man3/elf_memory.3.gz
OLD_FILES+=usr/share/man/man3/elf_next.3.gz
OLD_FILES+=usr/share/man/man3/elf_open.3.gz
OLD_FILES+=usr/share/man/man3/elf_rawfile.3.gz
OLD_FILES+=usr/share/man/man3/elf_rand.3.gz
OLD_FILES+=usr/share/man/man3/elf_strptr.3.gz
OLD_FILES+=usr/share/man/man3/elf_update.3.gz
OLD_FILES+=usr/share/man/man3/elf_version.3.gz
OLD_FILES+=usr/share/man/man3/gelf.3.gz
OLD_FILES+=usr/share/man/man3/gelf_checksum.3.gz
OLD_FILES+=usr/share/man/man3/gelf_fsize.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getcap.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getclass.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getdyn.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getehdr.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getmove.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getphdr.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getrel.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getrela.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getshdr.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getsym.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getsyminfo.3.gz
OLD_FILES+=usr/share/man/man3/gelf_getsymshndx.3.gz
OLD_FILES+=usr/share/man/man3/gelf_newehdr.3.gz
OLD_FILES+=usr/share/man/man3/gelf_newphdr.3.gz
OLD_FILES+=usr/share/man/man3/gelf_update_ehdr.3.gz
OLD_FILES+=usr/share/man/man3/gelf_xlatetof.3.gz
# lib/libelftc
OLD_FILES+=usr/share/man/man3/elftc.3.gz
OLD_FILES+=usr/share/man/man3/elftc_bfd_find_target.3.gz
OLD_FILES+=usr/share/man/man3/elftc_copyfile.3.gz
OLD_FILES+=usr/share/man/man3/elftc_demangle.3.gz
OLD_FILES+=usr/share/man/man3/elftc_reloc_type_str.3.gz
OLD_FILES+=usr/share/man/man3/elftc_set_timestamps.3.gz
OLD_FILES+=usr/share/man/man3/elftc_timestamp.3.gz
OLD_FILES+=usr/share/man/man3/elftc_string_table_create.3.gz
OLD_FILES+=usr/share/man/man3/elftc_version.3.gz
OLD_FILES+=usr/tests/usr.bin/yacc/Kyuafile
OLD_FILES+=usr/tests/usr.bin/yacc/btyacc_calc1.y
OLD_FILES+=usr/tests/usr.bin/yacc/btyacc_demo.y
OLD_FILES+=usr/tests/usr.bin/yacc/calc.y
OLD_FILES+=usr/tests/usr.bin/yacc/calc1.y
OLD_FILES+=usr/tests/usr.bin/yacc/calc2.y
OLD_FILES+=usr/tests/usr.bin/yacc/calc3.y
OLD_FILES+=usr/tests/usr.bin/yacc/code_calc.y
OLD_FILES+=usr/tests/usr.bin/yacc/code_debug.y
OLD_FILES+=usr/tests/usr.bin/yacc/code_error.y
OLD_FILES+=usr/tests/usr.bin/yacc/empty.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit1.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit2.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit3.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit4.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_inherit5.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax1.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax10.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax11.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax12.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax13.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax14.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax15.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax16.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax17.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax18.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax19.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax2.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax20.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax21.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax22.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax23.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax24.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax25.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax26.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax27.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax3.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax4.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax5.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax6.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax7.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax7a.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax7b.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax8.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax8a.y
OLD_FILES+=usr/tests/usr.bin/yacc/err_syntax9.y
OLD_FILES+=usr/tests/usr.bin/yacc/error.y
OLD_FILES+=usr/tests/usr.bin/yacc/grammar.y
OLD_FILES+=usr/tests/usr.bin/yacc/inherit0.y
OLD_FILES+=usr/tests/usr.bin/yacc/inherit1.y
OLD_FILES+=usr/tests/usr.bin/yacc/inherit2.y
OLD_FILES+=usr/tests/usr.bin/yacc/ok_syntax1.y
OLD_FILES+=usr/tests/usr.bin/yacc/pure_calc.y
OLD_FILES+=usr/tests/usr.bin/yacc/pure_error.y
OLD_FILES+=usr/tests/usr.bin/yacc/quote_calc.y
OLD_FILES+=usr/tests/usr.bin/yacc/quote_calc2.y
OLD_FILES+=usr/tests/usr.bin/yacc/quote_calc3.y
OLD_FILES+=usr/tests/usr.bin/yacc/quote_calc4.y
OLD_FILES+=usr/tests/usr.bin/yacc/run_test
OLD_FILES+=usr/tests/usr.bin/yacc/varsyntax_calc1.y
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/big_b.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/big_b.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/big_l.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/big_l.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc1.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc1.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc2.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc2.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc2.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc2.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc3.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc3.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc3.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/calc3.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_calc.code.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_calc.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_calc.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_calc.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_calc.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_error.code.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_error.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_error.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_error.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/code_error.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/empty.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/empty.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/empty.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/empty.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax1.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax1.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax10.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax10.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax10.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax10.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax11.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax11.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax11.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax11.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax12.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax12.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax12.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax12.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax13.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax13.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax13.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax13.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax14.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax14.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax14.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax14.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax15.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax15.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax15.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax15.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax16.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax16.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax16.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax16.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax17.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax17.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax17.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax17.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax18.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax18.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax18.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax18.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax19.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax19.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax19.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax19.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax2.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax2.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax2.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax2.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax20.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax20.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax20.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax20.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax21.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax21.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax21.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax21.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax22.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax22.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax22.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax22.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax23.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax23.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax23.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax23.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax24.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax24.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax24.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax24.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax25.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax25.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax25.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax25.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax26.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax26.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax26.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax26.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax27.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax27.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax27.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax27.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax3.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax3.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax3.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax3.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax4.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax4.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax4.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax4.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax5.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax5.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax5.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax5.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax6.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax6.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax6.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax6.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7a.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7a.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7a.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7a.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7b.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7b.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7b.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax7b.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8a.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8a.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8a.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax8a.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax9.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax9.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax9.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/err_syntax9.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/error.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/error.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/error.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/error.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/grammar.dot
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/grammar.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/grammar.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/grammar.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/grammar.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/help.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/help.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_b_opt.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_b_opt.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_b_opt1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_b_opt1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_code_c.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_code_c.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_defines.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_defines.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_graph.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_graph.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_include.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_include.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_opts.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_opts.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_output.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_output.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_output1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_output1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_output2.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_output2.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_p_opt.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_p_opt.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_p_opt1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_p_opt1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_verbose.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/no_verbose.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/nostdin.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/nostdin.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/ok_syntax1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/ok_syntax1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/ok_syntax1.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/ok_syntax1.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_calc.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_calc.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_calc.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_calc.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_error.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_error.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_error.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/pure_error.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc-s.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc-s.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc-s.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc-s.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2-s.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2-s.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2-s.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2-s.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc2.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3-s.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3-s.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3-s.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3-s.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc3.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4-s.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4-s.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4-s.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4-s.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/quote_calc4.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/rename_debug.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/rename_debug.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/rename_debug.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/rename_debug.i
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/rename_debug.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/varsyntax_calc1.error
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/varsyntax_calc1.output
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/varsyntax_calc1.tab.c
OLD_FILES+=usr/tests/usr.bin/yacc/yacc/varsyntax_calc1.tab.h
OLD_FILES+=usr/tests/usr.bin/yacc/yacc_tests
OLD_DIRS+=usr/tests/usr.bin/yacc
.endif
.if ${MK_UNBOUND} == no
OLD_FILES+=etc/rc.d/local_unbound
OLD_FILES+=etc/unbound
OLD_FILES+=usr/lib/private/libunbound.a
OLD_FILES+=usr/lib/private/libunbound.so
OLD_LIBS+=usr/lib/private/libunbound.so.5
OLD_FILES+=usr/lib/private/libunbound_p.a
.if ${TARGET_ARCH} == "amd64" || ${TARGET_ARCH} == "powerpc64"
OLD_FILES+=usr/lib32/private/libunbound.a
OLD_FILES+=usr/lib32/private/libunbound.so
OLD_LIBS+=usr/lib32/private/libunbound.so.5
OLD_FILES+=usr/lib32/private/libunbound_p.a
OLD_FILES+=usr/share/man/man5/local-unbound.conf.5.gz
OLD_FILES+=usr/share/man/man8/local-unbound-anchor.8.gz
OLD_FILES+=usr/share/man/man8/local-unbound-checkconf.8.gz
OLD_FILES+=usr/share/man/man8/local-unbound-control.8.gz
OLD_FILES+=usr/share/man/man8/local-unbound.8.gz
.endif
OLD_FILES+=usr/sbin/local-unbound-setup
OLD_FILES+=usr/sbin/local-unbound
OLD_FILES+=usr/sbin/local-unbound-anchor
OLD_FILES+=usr/sbin/local-unbound-checkconf
OLD_FILES+=usr/sbin/local-unbound-control
.endif
.if ${MK_USB} == no
OLD_FILES+=etc/devd/uath.conf
OLD_FILES+=etc/devd/uauth.conf
OLD_FILES+=etc/devd/ulpt.conf
OLD_FILES+=etc/devd/usb.conf
OLD_FILES+=usr/bin/usbhidaction
OLD_FILES+=usr/bin/usbhidctl
OLD_FILES+=usr/include/libusb.h
OLD_FILES+=usr/include/libusb20.h
OLD_FILES+=usr/include/libusb20_desc.h
OLD_FILES+=usr/include/usb.h
OLD_FILES+=usr/include/usbhid.h
OLD_FILES+=usr/lib/libusb.a
OLD_FILES+=usr/lib/libusb.so
OLD_LIBS+=usr/lib/libusb.so.3
OLD_FILES+=usr/lib/libusb_p.a
OLD_FILES+=usr/lib/libusbhid.a
OLD_FILES+=usr/lib/libusbhid.so
OLD_LIBS+=usr/lib/libusbhid.so.4
OLD_FILES+=usr/lib/libusbhid_p.a
OLD_FILES+=usr/lib32/libusb.a
OLD_FILES+=usr/lib32/libusb.so
OLD_LIBS+=usr/lib32/libusb.so.3
OLD_FILES+=usr/lib32/libusb_p.a
OLD_FILES+=usr/lib32/libusbhid.a
OLD_FILES+=usr/lib32/libusbhid.so
OLD_LIBS+=usr/lib32/libusbhid.so.4
OLD_FILES+=usr/lib32/libusbhid_p.a
OLD_FILES+=usr/libdata/pkgconfig/libusb-0.1.pc
OLD_FILES+=usr/libdata/pkgconfig/libusb-1.0.pc
OLD_FILES+=usr/libdata/pkgconfig/libusb-2.0.pc
OLD_FILES+=usr/sbin/uathload
OLD_FILES+=usr/sbin/uhsoctl
OLD_FILES+=usr/sbin/usbconfig
OLD_FILES+=usr/sbin/usbdump
OLD_FILES+=usr/share/examples/libusb20/Makefile
OLD_FILES+=usr/share/examples/libusb20/README
OLD_FILES+=usr/share/examples/libusb20/bulk.c
OLD_FILES+=usr/share/examples/libusb20/control.c
OLD_FILES+=usr/share/examples/libusb20/util.c
OLD_FILES+=usr/share/examples/libusb20/util.h
OLD_DIRS+=usr/share/examples/libusb20
OLD_FILES+=usr/share/firmware/ar5523.bin
OLD_FILES+=usr/share/man/man1/uhsoctl.1.gz
OLD_FILES+=usr/share/man/man1/usbhidaction.1.gz
OLD_FILES+=usr/share/man/man1/usbhidctl.1.gz
OLD_FILES+=usr/share/man/man3/hid_dispose_report_desc.3.gz
OLD_FILES+=usr/share/man/man3/hid_end_parse.3.gz
OLD_FILES+=usr/share/man/man3/hid_get_data.3.gz
OLD_FILES+=usr/share/man/man3/hid_get_item.3.gz
OLD_FILES+=usr/share/man/man3/hid_get_report_desc.3.gz
OLD_FILES+=usr/share/man/man3/hid_init.3.gz
OLD_FILES+=usr/share/man/man3/hid_locate.3.gz
OLD_FILES+=usr/share/man/man3/hid_report_size.3.gz
OLD_FILES+=usr/share/man/man3/hid_set_data.3.gz
OLD_FILES+=usr/share/man/man3/hid_start_parse.3.gz
OLD_FILES+=usr/share/man/man3/hid_usage_in_page.3.gz
OLD_FILES+=usr/share/man/man3/hid_usage_page.3.gz
OLD_FILES+=usr/share/man/man3/libusb.3.gz
OLD_FILES+=usr/share/man/man3/libusb20.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_add_dev_quirk.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_alloc_default.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_dequeue_device.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_device_foreach.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_enqueue_device.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_free.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_get_dev_quirk.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_get_quirk_name.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_get_template.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_remove_dev_quirk.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_be_set_template.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_desc_foreach.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_alloc.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_alloc_config.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_check_connected.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_close.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_detach_kernel_driver.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_free.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_address.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_backend_name.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_bus_number.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_config_index.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_debug.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_desc.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_device_desc.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_fd.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_iface_desc.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_info.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_mode.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_parent_address.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_parent_port.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_port_path.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_power_mode.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_power_usage.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_get_speed.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_kernel_driver_active.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_open.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_process.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_req_string_simple_sync.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_req_string_sync.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_request_sync.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_reset.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_set_alt_index.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_set_config_index.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_set_debug.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_set_power_mode.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_dev_wait_process.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_error_name.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_me_decode.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_me_encode.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_me_get_1.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_me_get_2.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_strerror.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_bulk_intr_sync.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_callback_wrapper.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_clear_stall_sync.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_close.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_drain.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_actual_frames.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_actual_length.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_length.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_max_frames.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_max_packet_length.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_max_total_length.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_pointer.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_priv_sc0.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_priv_sc1.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_status.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_get_time_complete.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_open.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_pending.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_buffer.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_callback.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_flags.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_length.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_priv_sc0.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_priv_sc1.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_timeout.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_set_total_frames.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_setup_bulk.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_setup_control.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_setup_intr.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_setup_isoc.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_start.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_stop.3.gz
OLD_FILES+=usr/share/man/man3/libusb20_tr_submit.3.gz
OLD_FILES+=usr/share/man/man3/libusb_alloc_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_attach_kernel_driver.3.gz
OLD_FILES+=usr/share/man/man3/libusb_bulk_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_cancel_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_check_connected.3.gz
OLD_FILES+=usr/share/man/man3/libusb_claim_interface.3.gz
OLD_FILES+=usr/share/man/man3/libusb_clear_halt.3.gz
OLD_FILES+=usr/share/man/man3/libusb_close.3.gz
OLD_FILES+=usr/share/man/man3/libusb_control_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_detach_kernel_driver.3.gz
OLD_FILES+=usr/share/man/man3/libusb_detach_kernel_driver_np.3.gz
OLD_FILES+=usr/share/man/man3/libusb_error_name.3.gz
OLD_FILES+=usr/share/man/man3/libusb_event_handler_active.3.gz
OLD_FILES+=usr/share/man/man3/libusb_event_handling_ok.3.gz
OLD_FILES+=usr/share/man/man3/libusb_exit.3.gz
OLD_FILES+=usr/share/man/man3/libusb_free_bos_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_free_config_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_free_device_list.3.gz
OLD_FILES+=usr/share/man/man3/libusb_free_ss_endpoint_comp.3.gz
OLD_FILES+=usr/share/man/man3/libusb_free_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_active_config_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_bus_number.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_config_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_config_descriptor_by_value.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_configuration.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_device.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_device_address.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_device_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_device_list.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_device_speed.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_driver.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_driver_np.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_max_iso_packet_size.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_max_packet_size.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_next_timeout.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_pollfds.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_string_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_get_string_descriptor_ascii.3.gz
OLD_FILES+=usr/share/man/man3/libusb_handle_events.3.gz
OLD_FILES+=usr/share/man/man3/libusb_handle_events_completed.3.gz
OLD_FILES+=usr/share/man/man3/libusb_handle_events_locked.3.gz
OLD_FILES+=usr/share/man/man3/libusb_handle_events_timeout.3.gz
OLD_FILES+=usr/share/man/man3/libusb_handle_events_timeout_completed.3.gz
OLD_FILES+=usr/share/man/man3/libusb_init.3.gz
OLD_FILES+=usr/share/man/man3/libusb_interrupt_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_kernel_driver_active.3.gz
OLD_FILES+=usr/share/man/man3/libusb_lock_event_waiters.3.gz
OLD_FILES+=usr/share/man/man3/libusb_lock_events.3.gz
OLD_FILES+=usr/share/man/man3/libusb_open.3.gz
OLD_FILES+=usr/share/man/man3/libusb_open_device_with_vid_pid.3.gz
OLD_FILES+=usr/share/man/man3/libusb_parse_bos_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/libusb_parse_ss_endpoint_comp.3.gz
OLD_FILES+=usr/share/man/man3/libusb_ref_device.3.gz
OLD_FILES+=usr/share/man/man3/libusb_release_interface.3.gz
OLD_FILES+=usr/share/man/man3/libusb_reset_device.3.gz
OLD_FILES+=usr/share/man/man3/libusb_set_configuration.3.gz
OLD_FILES+=usr/share/man/man3/libusb_set_debug.3.gz
OLD_FILES+=usr/share/man/man3/libusb_set_interface_alt_setting.3.gz
OLD_FILES+=usr/share/man/man3/libusb_set_pollfd_notifiers.3.gz
OLD_FILES+=usr/share/man/man3/libusb_strerror.3.gz
OLD_FILES+=usr/share/man/man3/libusb_submit_transfer.3.gz
OLD_FILES+=usr/share/man/man3/libusb_try_lock_events.3.gz
OLD_FILES+=usr/share/man/man3/libusb_unlock_event_waiters.3.gz
OLD_FILES+=usr/share/man/man3/libusb_unlock_events.3.gz
OLD_FILES+=usr/share/man/man3/libusb_unref_device.3.gz
OLD_FILES+=usr/share/man/man3/libusb_wait_for_event.3.gz
OLD_FILES+=usr/share/man/man3/libusbhid.3.gz
OLD_FILES+=usr/share/man/man3/usb.3.gz
OLD_FILES+=usr/share/man/man3/usb_bulk_read.3.gz
OLD_FILES+=usr/share/man/man3/usb_bulk_write.3.gz
OLD_FILES+=usr/share/man/man3/usb_check_connected.3.gz
OLD_FILES+=usr/share/man/man3/usb_claim_interface.3.gz
OLD_FILES+=usr/share/man/man3/usb_clear_halt.3.gz
OLD_FILES+=usr/share/man/man3/usb_close.3.gz
OLD_FILES+=usr/share/man/man3/usb_control_msg.3.gz
OLD_FILES+=usr/share/man/man3/usb_destroy_configuration.3.gz
OLD_FILES+=usr/share/man/man3/usb_device.3.gz
OLD_FILES+=usr/share/man/man3/usb_fetch_and_parse_descriptors.3.gz
OLD_FILES+=usr/share/man/man3/usb_find_busses.3.gz
OLD_FILES+=usr/share/man/man3/usb_find_devices.3.gz
OLD_FILES+=usr/share/man/man3/usb_get_busses.3.gz
OLD_FILES+=usr/share/man/man3/usb_get_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/usb_get_descriptor_by_endpoint.3.gz
OLD_FILES+=usr/share/man/man3/usb_get_string.3.gz
OLD_FILES+=usr/share/man/man3/usb_get_string_simple.3.gz
OLD_FILES+=usr/share/man/man3/usb_init.3.gz
OLD_FILES+=usr/share/man/man3/usb_interrupt_read.3.gz
OLD_FILES+=usr/share/man/man3/usb_interrupt_write.3.gz
OLD_FILES+=usr/share/man/man3/usb_open.3.gz
OLD_FILES+=usr/share/man/man3/usb_parse_configuration.3.gz
OLD_FILES+=usr/share/man/man3/usb_parse_descriptor.3.gz
OLD_FILES+=usr/share/man/man3/usb_release_interface.3.gz
OLD_FILES+=usr/share/man/man3/usb_reset.3.gz
OLD_FILES+=usr/share/man/man3/usb_resetep.3.gz
OLD_FILES+=usr/share/man/man3/usb_set_altinterface.3.gz
OLD_FILES+=usr/share/man/man3/usb_set_configuration.3.gz
OLD_FILES+=usr/share/man/man3/usb_set_debug.3.gz
OLD_FILES+=usr/share/man/man3/usb_strerror.3.gz
OLD_FILES+=usr/share/man/man3/usbhid.3.gz
OLD_FILES+=usr/share/man/man4/if_otus.4.gz
OLD_FILES+=usr/share/man/man4/if_rsu.4.gz
OLD_FILES+=usr/share/man/man4/if_rtwn_usb.4.gz
OLD_FILES+=usr/share/man/man4/if_rum.4.gz
OLD_FILES+=usr/share/man/man4/if_run.4.gz
OLD_FILES+=usr/share/man/man4/if_zyd.4.gz
OLD_FILES+=usr/share/man/man4/otus.4.gz
OLD_FILES+=usr/share/man/man4/otusfw.4.gz
OLD_FILES+=usr/share/man/man4/rsu.4.gz
OLD_FILES+=usr/share/man/man4/rsufw.4.gz
OLD_FILES+=usr/share/man/man4/rtwn_usb.4.gz
OLD_FILES+=usr/share/man/man4/rum.4.gz
OLD_FILES+=usr/share/man/man4/run.4.gz
OLD_FILES+=usr/share/man/man4/runfw.4.gz
OLD_FILES+=usr/share/man/man4/u3g.4.gz
OLD_FILES+=usr/share/man/man4/u3gstub.4.gz
OLD_FILES+=usr/share/man/man4/uark.4.gz
OLD_FILES+=usr/share/man/man4/uart.4.gz
OLD_FILES+=usr/share/man/man4/uath.4.gz
OLD_FILES+=usr/share/man/man4/ubsa.4.gz
OLD_FILES+=usr/share/man/man4/ubsec.4.gz
OLD_FILES+=usr/share/man/man4/ubser.4.gz
OLD_FILES+=usr/share/man/man4/ubtbcmfw.4.gz
OLD_FILES+=usr/share/man/man4/uchcom.4.gz
OLD_FILES+=usr/share/man/man4/ucom.4.gz
OLD_FILES+=usr/share/man/man4/ucycom.4.gz
OLD_FILES+=usr/share/man/man4/udav.4.gz
OLD_FILES+=usr/share/man/man4/udbp.4.gz
OLD_FILES+=usr/share/man/man4/uep.4.gz
OLD_FILES+=usr/share/man/man4/ufm.4.gz
OLD_FILES+=usr/share/man/man4/ufoma.4.gz
OLD_FILES+=usr/share/man/man4/uftdi.4.gz
OLD_FILES+=usr/share/man/man4/ugen.4.gz
OLD_FILES+=usr/share/man/man4/uhci.4.gz
OLD_FILES+=usr/share/man/man4/uhid.4.gz
OLD_FILES+=usr/share/man/man4/uhso.4.gz
OLD_FILES+=usr/share/man/man4/uipaq.4.gz
OLD_FILES+=usr/share/man/man4/ukbd.4.gz
OLD_FILES+=usr/share/man/man4/uled.4.gz
OLD_FILES+=usr/share/man/man4/ulpt.4.gz
OLD_FILES+=usr/share/man/man4/umass.4.gz
OLD_FILES+=usr/share/man/man4/umcs.4.gz
OLD_FILES+=usr/share/man/man4/umct.4.gz
OLD_FILES+=usr/share/man/man4/umodem.4.gz
OLD_FILES+=usr/share/man/man4/umoscom.4.gz
OLD_FILES+=usr/share/man/man4/ums.4.gz
OLD_FILES+=usr/share/man/man4/unix.4.gz
OLD_FILES+=usr/share/man/man4/upgt.4.gz
OLD_FILES+=usr/share/man/man4/uplcom.4.gz
OLD_FILES+=usr/share/man/man4/ural.4.gz
OLD_FILES+=usr/share/man/man4/urio.4.gz
OLD_FILES+=usr/share/man/man4/urndis.4.gz
OLD_FILES+=usr/share/man/man4/urtw.4.gz
OLD_FILES+=usr/share/man/man4/usb.4.gz
OLD_FILES+=usr/share/man/man4/usb_quirk.4.gz
OLD_FILES+=usr/share/man/man4/usb_template.4.gz
OLD_FILES+=usr/share/man/man4/usfs.4.gz
OLD_FILES+=usr/share/man/man4/uslcom.4.gz
OLD_FILES+=usr/share/man/man4/uvisor.4.gz
OLD_FILES+=usr/share/man/man4/uvscom.4.gz
OLD_FILES+=usr/share/man/man4/zyd.4.gz
OLD_FILES+=usr/share/man/man8/uathload.8.gz
OLD_FILES+=usr/share/man/man8/usbconfig.8.gz
OLD_FILES+=usr/share/man/man8/usbdump.8.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_alloc_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_attach.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_detach.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_free_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_get_data.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_get_data_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_get_data_error.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_get_data_linear.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_put_bytes_max.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_put_data.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_put_data_buffer.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_put_data_error.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_put_data_linear.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_reset.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_softc.9.gz
OLD_FILES+=usr/share/man/man9/usb_fifo_wakeup.9.gz
OLD_FILES+=usr/share/man/man9/usbd_do_request.9.gz
OLD_FILES+=usr/share/man/man9/usbd_do_request_flags.9.gz
OLD_FILES+=usr/share/man/man9/usbd_errstr.9.gz
OLD_FILES+=usr/share/man/man9/usbd_lookup_id_by_info.9.gz
OLD_FILES+=usr/share/man/man9/usbd_lookup_id_by_uaa.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_clear_stall.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_drain.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_pending.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_poll.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_setup.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_start.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_stop.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_submit.9.gz
OLD_FILES+=usr/share/man/man9/usbd_transfer_unsetup.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_clr_flag.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_frame_data.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_frame_len.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_get_frame.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_get_priv.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_is_stalled.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_max_framelen.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_max_frames.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_max_len.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_flag.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_frame_data.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_frame_len.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_frame_offset.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_frames.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_interval.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_priv.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_stall.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_set_timeout.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_softc.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_state.9.gz
OLD_FILES+=usr/share/man/man9/usbd_xfer_status.9.gz
OLD_FILES+=usr/share/man/man9/usbdi.9.gz
OLD_FILES+=usr/share/misc/usb_hid_usages
OLD_FILES+=usr/share/misc/usbdevs
.endif
.if ${MK_UTMPX} == no
OLD_FILES+=etc/periodic/monthly/200.accounting
OLD_FILES+=usr/bin/last
OLD_FILES+=usr/bin/users
OLD_FILES+=usr/bin/who
OLD_FILES+=usr/sbin/ac
OLD_FILES+=usr/sbin/lastlogin
OLD_FILES+=usr/sbin/utx
OLD_FILES+=usr/share/man/man1/last.1.gz
OLD_FILES+=usr/share/man/man1/users.1.gz
OLD_FILES+=usr/share/man/man1/who.1.gz
OLD_FILES+=usr/share/man/man8/ac.8.gz
OLD_FILES+=usr/share/man/man8/lastlogin.8.gz
OLD_FILES+=usr/share/man/man8/utx.8.gz
.endif
.if ${MK_WIRELESS} == no
OLD_FILES+=etc/regdomain.xml
OLD_FILES+=etc/rc.d/hostapd
OLD_FILES+=etc/rc.d/wpa_supplicant
OLD_FILES+=usr/sbin/ancontrol
OLD_FILES+=usr/sbin/hostapd
OLD_FILES+=usr/sbin/hostapd_cli
OLD_FILES+=usr/sbin/ndis_events
OLD_FILES+=usr/sbin/wlandebug
OLD_FILES+=usr/sbin/wpa_cli
OLD_FILES+=usr/sbin/wpa_passphrase
OLD_FILES+=usr/sbin/wpa_supplicant
OLD_FILES+=usr/share/examples/etc/regdomain.xml
OLD_FILES+=usr/share/examples/etc/wpa_supplicant.conf
OLD_FILES+=usr/share/examples/hostapd/hostapd.conf
OLD_FILES+=usr/share/examples/hostapd/hostapd.eap_user
OLD_FILES+=usr/share/examples/hostapd/hostapd.wpa_psk
OLD_DIRS+=usr/share/examples/hostapd
OLD_FILES+=usr/share/man/man5/hostapd.conf.5.gz
OLD_FILES+=usr/share/man/man5/wpa_supplicant.conf.5.gz
OLD_FILES+=usr/share/man/man8/ancontrol.8.gz
OLD_FILES+=usr/share/man/man8/hostapd.8.gz
OLD_FILES+=usr/share/man/man8/hostapd_cli.8.gz
OLD_FILES+=usr/share/man/man8/ndis_events.8.gz
OLD_FILES+=usr/share/man/man8/wlandebug.8.gz
OLD_FILES+=usr/share/man/man8/wpa_cli.8.gz
OLD_FILES+=usr/share/man/man8/wpa_passphrase.8.gz
OLD_FILES+=usr/share/man/man8/wpa_supplicant.8.gz
OLD_FILES+=usr/lib/snmp_wlan.so
OLD_LIBS+=usr/lib/snmp_wlan.so.6
# bsnmp module
OLD_FILES+=usr/share/man/man3/snmp_wlan.3.gz
OLD_FILES+=usr/share/snmp/defs/wlan_tree.def
OLD_FILES+=usr/share/snmp/mibs/BEGEMOT-WIRELESS-MIB.txt
.endif
.if ${MK_SVNLITE} == no || ${MK_SVN} == yes
OLD_FILES+=usr/bin/svnlite
OLD_FILES+=usr/bin/svnliteadmin
OLD_FILES+=usr/bin/svnlitebench
OLD_FILES+=usr/bin/svnlitedumpfilter
OLD_FILES+=usr/bin/svnlitefsfs
OLD_FILES+=usr/bin/svnlitelook
OLD_FILES+=usr/bin/svnlitemucc
OLD_FILES+=usr/bin/svnliterdump
OLD_FILES+=usr/bin/svnliteserve
OLD_FILES+=usr/bin/svnlitesync
OLD_FILES+=usr/bin/svnliteversion
OLD_FILES+=usr/share/man/man1/svnlite.1.gz
.endif
.if ${MK_SVN} == no
OLD_FILES+=usr/bin/svn
OLD_FILES+=usr/bin/svnadmin
OLD_FILES+=usr/bin/svnbench
OLD_FILES+=usr/bin/svndumpfilter
OLD_FILES+=usr/bin/svnfsfs
OLD_FILES+=usr/bin/svnlook
OLD_FILES+=usr/bin/svnmucc
OLD_FILES+=usr/bin/svnrdump
OLD_FILES+=usr/bin/svnserve
OLD_FILES+=usr/bin/svnsync
OLD_FILES+=usr/bin/svnversion
.endif
.if ${MK_HYPERV} == no
OLD_FILES+=etc/devd/hyperv.conf
OLD_FILES+=usr/libexec/hyperv/hv_set_ifconfig
OLD_FILES+=usr/libexec/hyperv/hv_get_dns_info
OLD_FILES+=usr/libexec/hyperv/hv_get_dhcp_info
OLD_FILES+=usr/sbin/hv_kvp_daemon
OLD_FILES+=usr/sbin/hv_vss_daemon
OLD_FILES+=usr/share/man/man8/hv_kvp_daemon.8.gz
.endif
.if ${MK_ZONEINFO} == no
OLD_FILES+=usr/share/zoneinfo/Africa/Abidjan
OLD_FILES+=usr/share/zoneinfo/Africa/Accra
OLD_FILES+=usr/share/zoneinfo/Africa/Addis_Ababa
OLD_FILES+=usr/share/zoneinfo/Africa/Algiers
OLD_FILES+=usr/share/zoneinfo/Africa/Asmara
OLD_FILES+=usr/share/zoneinfo/Africa/Bamako
OLD_FILES+=usr/share/zoneinfo/Africa/Bangui
OLD_FILES+=usr/share/zoneinfo/Africa/Banjul
OLD_FILES+=usr/share/zoneinfo/Africa/Bissau
OLD_FILES+=usr/share/zoneinfo/Africa/Blantyre
OLD_FILES+=usr/share/zoneinfo/Africa/Brazzaville
OLD_FILES+=usr/share/zoneinfo/Africa/Bujumbura
OLD_FILES+=usr/share/zoneinfo/Africa/Cairo
OLD_FILES+=usr/share/zoneinfo/Africa/Casablanca
OLD_FILES+=usr/share/zoneinfo/Africa/Ceuta
OLD_FILES+=usr/share/zoneinfo/Africa/Conakry
OLD_FILES+=usr/share/zoneinfo/Africa/Dakar
OLD_FILES+=usr/share/zoneinfo/Africa/Dar_es_Salaam
OLD_FILES+=usr/share/zoneinfo/Africa/Djibouti
OLD_FILES+=usr/share/zoneinfo/Africa/Douala
OLD_FILES+=usr/share/zoneinfo/Africa/El_Aaiun
OLD_FILES+=usr/share/zoneinfo/Africa/Freetown
OLD_FILES+=usr/share/zoneinfo/Africa/Gaborone
OLD_FILES+=usr/share/zoneinfo/Africa/Harare
OLD_FILES+=usr/share/zoneinfo/Africa/Johannesburg
OLD_FILES+=usr/share/zoneinfo/Africa/Juba
OLD_FILES+=usr/share/zoneinfo/Africa/Kampala
OLD_FILES+=usr/share/zoneinfo/Africa/Khartoum
OLD_FILES+=usr/share/zoneinfo/Africa/Kigali
OLD_FILES+=usr/share/zoneinfo/Africa/Kinshasa
OLD_FILES+=usr/share/zoneinfo/Africa/Lagos
OLD_FILES+=usr/share/zoneinfo/Africa/Libreville
OLD_FILES+=usr/share/zoneinfo/Africa/Lome
OLD_FILES+=usr/share/zoneinfo/Africa/Luanda
OLD_FILES+=usr/share/zoneinfo/Africa/Lubumbashi
OLD_FILES+=usr/share/zoneinfo/Africa/Lusaka
OLD_FILES+=usr/share/zoneinfo/Africa/Malabo
OLD_FILES+=usr/share/zoneinfo/Africa/Maputo
OLD_FILES+=usr/share/zoneinfo/Africa/Maseru
OLD_FILES+=usr/share/zoneinfo/Africa/Mbabane
OLD_FILES+=usr/share/zoneinfo/Africa/Mogadishu
OLD_FILES+=usr/share/zoneinfo/Africa/Monrovia
OLD_FILES+=usr/share/zoneinfo/Africa/Nairobi
OLD_FILES+=usr/share/zoneinfo/Africa/Ndjamena
OLD_FILES+=usr/share/zoneinfo/Africa/Niamey
OLD_FILES+=usr/share/zoneinfo/Africa/Nouakchott
OLD_FILES+=usr/share/zoneinfo/Africa/Ouagadougou
OLD_FILES+=usr/share/zoneinfo/Africa/Porto-Novo
OLD_FILES+=usr/share/zoneinfo/Africa/Sao_Tome
OLD_FILES+=usr/share/zoneinfo/Africa/Tripoli
OLD_FILES+=usr/share/zoneinfo/Africa/Tunis
OLD_FILES+=usr/share/zoneinfo/Africa/Windhoek
OLD_FILES+=usr/share/zoneinfo/America/Adak
OLD_FILES+=usr/share/zoneinfo/America/Anchorage
OLD_FILES+=usr/share/zoneinfo/America/Anguilla
OLD_FILES+=usr/share/zoneinfo/America/Antigua
OLD_FILES+=usr/share/zoneinfo/America/Araguaina
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Buenos_Aires
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Catamarca
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Cordoba
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Jujuy
OLD_FILES+=usr/share/zoneinfo/America/Argentina/La_Rioja
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Mendoza
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Rio_Gallegos
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Salta
OLD_FILES+=usr/share/zoneinfo/America/Argentina/San_Juan
OLD_FILES+=usr/share/zoneinfo/America/Argentina/San_Luis
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Tucuman
OLD_FILES+=usr/share/zoneinfo/America/Argentina/Ushuaia
OLD_FILES+=usr/share/zoneinfo/America/Aruba
OLD_FILES+=usr/share/zoneinfo/America/Asuncion
OLD_FILES+=usr/share/zoneinfo/America/Atikokan
OLD_FILES+=usr/share/zoneinfo/America/Bahia
OLD_FILES+=usr/share/zoneinfo/America/Bahia_Banderas
OLD_FILES+=usr/share/zoneinfo/America/Barbados
OLD_FILES+=usr/share/zoneinfo/America/Belem
OLD_FILES+=usr/share/zoneinfo/America/Belize
OLD_FILES+=usr/share/zoneinfo/America/Blanc-Sablon
OLD_FILES+=usr/share/zoneinfo/America/Boa_Vista
OLD_FILES+=usr/share/zoneinfo/America/Bogota
OLD_FILES+=usr/share/zoneinfo/America/Boise
OLD_FILES+=usr/share/zoneinfo/America/Cambridge_Bay
OLD_FILES+=usr/share/zoneinfo/America/Campo_Grande
OLD_FILES+=usr/share/zoneinfo/America/Cancun
OLD_FILES+=usr/share/zoneinfo/America/Caracas
OLD_FILES+=usr/share/zoneinfo/America/Cayenne
OLD_FILES+=usr/share/zoneinfo/America/Cayman
OLD_FILES+=usr/share/zoneinfo/America/Chicago
OLD_FILES+=usr/share/zoneinfo/America/Chihuahua
OLD_FILES+=usr/share/zoneinfo/America/Costa_Rica
OLD_FILES+=usr/share/zoneinfo/America/Creston
OLD_FILES+=usr/share/zoneinfo/America/Cuiaba
OLD_FILES+=usr/share/zoneinfo/America/Curacao
OLD_FILES+=usr/share/zoneinfo/America/Danmarkshavn
OLD_FILES+=usr/share/zoneinfo/America/Dawson
OLD_FILES+=usr/share/zoneinfo/America/Dawson_Creek
OLD_FILES+=usr/share/zoneinfo/America/Denver
OLD_FILES+=usr/share/zoneinfo/America/Detroit
OLD_FILES+=usr/share/zoneinfo/America/Dominica
OLD_FILES+=usr/share/zoneinfo/America/Edmonton
OLD_FILES+=usr/share/zoneinfo/America/Eirunepe
OLD_FILES+=usr/share/zoneinfo/America/El_Salvador
OLD_FILES+=usr/share/zoneinfo/America/Fortaleza
OLD_FILES+=usr/share/zoneinfo/America/Glace_Bay
OLD_FILES+=usr/share/zoneinfo/America/Godthab
OLD_FILES+=usr/share/zoneinfo/America/Goose_Bay
OLD_FILES+=usr/share/zoneinfo/America/Grand_Turk
OLD_FILES+=usr/share/zoneinfo/America/Grenada
OLD_FILES+=usr/share/zoneinfo/America/Guadeloupe
OLD_FILES+=usr/share/zoneinfo/America/Guatemala
OLD_FILES+=usr/share/zoneinfo/America/Guayaquil
OLD_FILES+=usr/share/zoneinfo/America/Guyana
OLD_FILES+=usr/share/zoneinfo/America/Halifax
OLD_FILES+=usr/share/zoneinfo/America/Havana
OLD_FILES+=usr/share/zoneinfo/America/Hermosillo
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Indianapolis
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Knox
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Marengo
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Petersburg
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Tell_City
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Vevay
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Vincennes
OLD_FILES+=usr/share/zoneinfo/America/Indiana/Winamac
OLD_FILES+=usr/share/zoneinfo/America/Inuvik
OLD_FILES+=usr/share/zoneinfo/America/Iqaluit
OLD_FILES+=usr/share/zoneinfo/America/Jamaica
OLD_FILES+=usr/share/zoneinfo/America/Juneau
OLD_FILES+=usr/share/zoneinfo/America/Kentucky/Louisville
OLD_FILES+=usr/share/zoneinfo/America/Kentucky/Monticello
OLD_FILES+=usr/share/zoneinfo/America/Kralendijk
OLD_FILES+=usr/share/zoneinfo/America/La_Paz
OLD_FILES+=usr/share/zoneinfo/America/Lima
OLD_FILES+=usr/share/zoneinfo/America/Los_Angeles
OLD_FILES+=usr/share/zoneinfo/America/Lower_Princes
OLD_FILES+=usr/share/zoneinfo/America/Maceio
OLD_FILES+=usr/share/zoneinfo/America/Managua
OLD_FILES+=usr/share/zoneinfo/America/Manaus
OLD_FILES+=usr/share/zoneinfo/America/Marigot
OLD_FILES+=usr/share/zoneinfo/America/Martinique
OLD_FILES+=usr/share/zoneinfo/America/Matamoros
OLD_FILES+=usr/share/zoneinfo/America/Mazatlan
OLD_FILES+=usr/share/zoneinfo/America/Menominee
OLD_FILES+=usr/share/zoneinfo/America/Merida
OLD_FILES+=usr/share/zoneinfo/America/Metlakatla
OLD_FILES+=usr/share/zoneinfo/America/Mexico_City
OLD_FILES+=usr/share/zoneinfo/America/Miquelon
OLD_FILES+=usr/share/zoneinfo/America/Moncton
OLD_FILES+=usr/share/zoneinfo/America/Monterrey
OLD_FILES+=usr/share/zoneinfo/America/Montevideo
OLD_FILES+=usr/share/zoneinfo/America/Montreal
OLD_FILES+=usr/share/zoneinfo/America/Montserrat
OLD_FILES+=usr/share/zoneinfo/America/Nassau
OLD_FILES+=usr/share/zoneinfo/America/New_York
OLD_FILES+=usr/share/zoneinfo/America/Nipigon
OLD_FILES+=usr/share/zoneinfo/America/Nome
OLD_FILES+=usr/share/zoneinfo/America/Noronha
OLD_FILES+=usr/share/zoneinfo/America/North_Dakota/Beulah
OLD_FILES+=usr/share/zoneinfo/America/North_Dakota/Center
OLD_FILES+=usr/share/zoneinfo/America/North_Dakota/New_Salem
OLD_FILES+=usr/share/zoneinfo/America/Ojinaga
OLD_FILES+=usr/share/zoneinfo/America/Panama
OLD_FILES+=usr/share/zoneinfo/America/Pangnirtung
OLD_FILES+=usr/share/zoneinfo/America/Paramaribo
OLD_FILES+=usr/share/zoneinfo/America/Phoenix
OLD_FILES+=usr/share/zoneinfo/America/Port-au-Prince
OLD_FILES+=usr/share/zoneinfo/America/Port_of_Spain
OLD_FILES+=usr/share/zoneinfo/America/Porto_Velho
OLD_FILES+=usr/share/zoneinfo/America/Puerto_Rico
OLD_FILES+=usr/share/zoneinfo/America/Rainy_River
OLD_FILES+=usr/share/zoneinfo/America/Rankin_Inlet
OLD_FILES+=usr/share/zoneinfo/America/Recife
OLD_FILES+=usr/share/zoneinfo/America/Regina
OLD_FILES+=usr/share/zoneinfo/America/Resolute
OLD_FILES+=usr/share/zoneinfo/America/Rio_Branco
OLD_FILES+=usr/share/zoneinfo/America/Santa_Isabel
OLD_FILES+=usr/share/zoneinfo/America/Santarem
OLD_FILES+=usr/share/zoneinfo/America/Santiago
OLD_FILES+=usr/share/zoneinfo/America/Santo_Domingo
OLD_FILES+=usr/share/zoneinfo/America/Sao_Paulo
OLD_FILES+=usr/share/zoneinfo/America/Scoresbysund
OLD_FILES+=usr/share/zoneinfo/America/Sitka
OLD_FILES+=usr/share/zoneinfo/America/St_Barthelemy
OLD_FILES+=usr/share/zoneinfo/America/St_Johns
OLD_FILES+=usr/share/zoneinfo/America/St_Kitts
OLD_FILES+=usr/share/zoneinfo/America/St_Lucia
OLD_FILES+=usr/share/zoneinfo/America/St_Thomas
OLD_FILES+=usr/share/zoneinfo/America/St_Vincent
OLD_FILES+=usr/share/zoneinfo/America/Swift_Current
OLD_FILES+=usr/share/zoneinfo/America/Tegucigalpa
OLD_FILES+=usr/share/zoneinfo/America/Thule
OLD_FILES+=usr/share/zoneinfo/America/Thunder_Bay
OLD_FILES+=usr/share/zoneinfo/America/Tijuana
OLD_FILES+=usr/share/zoneinfo/America/Toronto
OLD_FILES+=usr/share/zoneinfo/America/Tortola
OLD_FILES+=usr/share/zoneinfo/America/Vancouver
OLD_FILES+=usr/share/zoneinfo/America/Whitehorse
OLD_FILES+=usr/share/zoneinfo/America/Winnipeg
OLD_FILES+=usr/share/zoneinfo/America/Yakutat
OLD_FILES+=usr/share/zoneinfo/America/Yellowknife
OLD_FILES+=usr/share/zoneinfo/Antarctica/Casey
OLD_FILES+=usr/share/zoneinfo/Antarctica/Davis
OLD_FILES+=usr/share/zoneinfo/Antarctica/DumontDUrville
OLD_FILES+=usr/share/zoneinfo/Antarctica/Macquarie
OLD_FILES+=usr/share/zoneinfo/Antarctica/Mawson
OLD_FILES+=usr/share/zoneinfo/Antarctica/McMurdo
OLD_FILES+=usr/share/zoneinfo/Antarctica/Palmer
OLD_FILES+=usr/share/zoneinfo/Antarctica/Rothera
OLD_FILES+=usr/share/zoneinfo/Antarctica/Syowa
OLD_FILES+=usr/share/zoneinfo/Antarctica/Troll
OLD_FILES+=usr/share/zoneinfo/Antarctica/Vostok
OLD_FILES+=usr/share/zoneinfo/Arctic/Longyearbyen
OLD_FILES+=usr/share/zoneinfo/Asia/Aden
OLD_FILES+=usr/share/zoneinfo/Asia/Almaty
OLD_FILES+=usr/share/zoneinfo/Asia/Amman
OLD_FILES+=usr/share/zoneinfo/Asia/Anadyr
OLD_FILES+=usr/share/zoneinfo/Asia/Aqtau
OLD_FILES+=usr/share/zoneinfo/Asia/Aqtobe
OLD_FILES+=usr/share/zoneinfo/Asia/Ashgabat
OLD_FILES+=usr/share/zoneinfo/Asia/Baghdad
OLD_FILES+=usr/share/zoneinfo/Asia/Bahrain
OLD_FILES+=usr/share/zoneinfo/Asia/Baku
OLD_FILES+=usr/share/zoneinfo/Asia/Bangkok
OLD_FILES+=usr/share/zoneinfo/Asia/Beirut
OLD_FILES+=usr/share/zoneinfo/Asia/Bishkek
OLD_FILES+=usr/share/zoneinfo/Asia/Brunei
OLD_FILES+=usr/share/zoneinfo/Asia/Chita
OLD_FILES+=usr/share/zoneinfo/Asia/Choibalsan
OLD_FILES+=usr/share/zoneinfo/Asia/Colombo
OLD_FILES+=usr/share/zoneinfo/Asia/Damascus
OLD_FILES+=usr/share/zoneinfo/Asia/Dhaka
OLD_FILES+=usr/share/zoneinfo/Asia/Dili
OLD_FILES+=usr/share/zoneinfo/Asia/Dubai
OLD_FILES+=usr/share/zoneinfo/Asia/Dushanbe
OLD_FILES+=usr/share/zoneinfo/Asia/Gaza
OLD_FILES+=usr/share/zoneinfo/Asia/Hebron
OLD_FILES+=usr/share/zoneinfo/Asia/Ho_Chi_Minh
OLD_FILES+=usr/share/zoneinfo/Asia/Hong_Kong
OLD_FILES+=usr/share/zoneinfo/Asia/Hovd
OLD_FILES+=usr/share/zoneinfo/Asia/Irkutsk
OLD_FILES+=usr/share/zoneinfo/Asia/Istanbul
OLD_FILES+=usr/share/zoneinfo/Asia/Jakarta
OLD_FILES+=usr/share/zoneinfo/Asia/Jayapura
OLD_FILES+=usr/share/zoneinfo/Asia/Jerusalem
OLD_FILES+=usr/share/zoneinfo/Asia/Kabul
OLD_FILES+=usr/share/zoneinfo/Asia/Kamchatka
OLD_FILES+=usr/share/zoneinfo/Asia/Karachi
OLD_FILES+=usr/share/zoneinfo/Asia/Kathmandu
OLD_FILES+=usr/share/zoneinfo/Asia/Khandyga
OLD_FILES+=usr/share/zoneinfo/Asia/Kolkata
OLD_FILES+=usr/share/zoneinfo/Asia/Krasnoyarsk
OLD_FILES+=usr/share/zoneinfo/Asia/Kuala_Lumpur
OLD_FILES+=usr/share/zoneinfo/Asia/Kuching
OLD_FILES+=usr/share/zoneinfo/Asia/Kuwait
OLD_FILES+=usr/share/zoneinfo/Asia/Macau
OLD_FILES+=usr/share/zoneinfo/Asia/Magadan
OLD_FILES+=usr/share/zoneinfo/Asia/Makassar
OLD_FILES+=usr/share/zoneinfo/Asia/Manila
OLD_FILES+=usr/share/zoneinfo/Asia/Muscat
OLD_FILES+=usr/share/zoneinfo/Asia/Nicosia
OLD_FILES+=usr/share/zoneinfo/Asia/Novokuznetsk
OLD_FILES+=usr/share/zoneinfo/Asia/Novosibirsk
OLD_FILES+=usr/share/zoneinfo/Asia/Omsk
OLD_FILES+=usr/share/zoneinfo/Asia/Oral
OLD_FILES+=usr/share/zoneinfo/Asia/Phnom_Penh
OLD_FILES+=usr/share/zoneinfo/Asia/Pontianak
OLD_FILES+=usr/share/zoneinfo/Asia/Pyongyang
OLD_FILES+=usr/share/zoneinfo/Asia/Qatar
OLD_FILES+=usr/share/zoneinfo/Asia/Qyzylorda
OLD_FILES+=usr/share/zoneinfo/Asia/Rangoon
OLD_FILES+=usr/share/zoneinfo/Asia/Riyadh
OLD_FILES+=usr/share/zoneinfo/Asia/Sakhalin
OLD_FILES+=usr/share/zoneinfo/Asia/Samarkand
OLD_FILES+=usr/share/zoneinfo/Asia/Seoul
OLD_FILES+=usr/share/zoneinfo/Asia/Shanghai
OLD_FILES+=usr/share/zoneinfo/Asia/Singapore
OLD_FILES+=usr/share/zoneinfo/Asia/Srednekolymsk
OLD_FILES+=usr/share/zoneinfo/Asia/Taipei
OLD_FILES+=usr/share/zoneinfo/Asia/Tashkent
OLD_FILES+=usr/share/zoneinfo/Asia/Tbilisi
OLD_FILES+=usr/share/zoneinfo/Asia/Tehran
OLD_FILES+=usr/share/zoneinfo/Asia/Thimphu
OLD_FILES+=usr/share/zoneinfo/Asia/Tokyo
OLD_FILES+=usr/share/zoneinfo/Asia/Ulaanbaatar
OLD_FILES+=usr/share/zoneinfo/Asia/Urumqi
OLD_FILES+=usr/share/zoneinfo/Asia/Ust-Nera
OLD_FILES+=usr/share/zoneinfo/Asia/Vientiane
OLD_FILES+=usr/share/zoneinfo/Asia/Vladivostok
OLD_FILES+=usr/share/zoneinfo/Asia/Yakutsk
OLD_FILES+=usr/share/zoneinfo/Asia/Yekaterinburg
OLD_FILES+=usr/share/zoneinfo/Asia/Yerevan
OLD_FILES+=usr/share/zoneinfo/Atlantic/Azores
OLD_FILES+=usr/share/zoneinfo/Atlantic/Bermuda
OLD_FILES+=usr/share/zoneinfo/Atlantic/Canary
OLD_FILES+=usr/share/zoneinfo/Atlantic/Cape_Verde
OLD_FILES+=usr/share/zoneinfo/Atlantic/Faroe
OLD_FILES+=usr/share/zoneinfo/Atlantic/Madeira
OLD_FILES+=usr/share/zoneinfo/Atlantic/Reykjavik
OLD_FILES+=usr/share/zoneinfo/Atlantic/South_Georgia
OLD_FILES+=usr/share/zoneinfo/Atlantic/St_Helena
OLD_FILES+=usr/share/zoneinfo/Atlantic/Stanley
OLD_FILES+=usr/share/zoneinfo/Australia/Adelaide
OLD_FILES+=usr/share/zoneinfo/Australia/Brisbane
OLD_FILES+=usr/share/zoneinfo/Australia/Broken_Hill
OLD_FILES+=usr/share/zoneinfo/Australia/Currie
OLD_FILES+=usr/share/zoneinfo/Australia/Darwin
OLD_FILES+=usr/share/zoneinfo/Australia/Eucla
OLD_FILES+=usr/share/zoneinfo/Australia/Hobart
OLD_FILES+=usr/share/zoneinfo/Australia/Lindeman
OLD_FILES+=usr/share/zoneinfo/Australia/Lord_Howe
OLD_FILES+=usr/share/zoneinfo/Australia/Melbourne
OLD_FILES+=usr/share/zoneinfo/Australia/Perth
OLD_FILES+=usr/share/zoneinfo/Australia/Sydney
OLD_FILES+=usr/share/zoneinfo/CET
OLD_FILES+=usr/share/zoneinfo/CST6CDT
OLD_FILES+=usr/share/zoneinfo/EET
OLD_FILES+=usr/share/zoneinfo/EST
OLD_FILES+=usr/share/zoneinfo/EST5EDT
OLD_FILES+=usr/share/zoneinfo/Etc/GMT
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+0
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+1
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+10
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+11
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+12
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+2
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+3
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+4
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+5
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+6
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+7
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+8
OLD_FILES+=usr/share/zoneinfo/Etc/GMT+9
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-0
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-1
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-10
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-11
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-12
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-13
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-14
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-2
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-3
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-4
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-5
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-6
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-7
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-8
OLD_FILES+=usr/share/zoneinfo/Etc/GMT-9
OLD_FILES+=usr/share/zoneinfo/Etc/GMT0
OLD_FILES+=usr/share/zoneinfo/Etc/Greenwich
OLD_FILES+=usr/share/zoneinfo/Etc/UCT
OLD_FILES+=usr/share/zoneinfo/Etc/UTC
OLD_FILES+=usr/share/zoneinfo/Etc/Universal
OLD_FILES+=usr/share/zoneinfo/Etc/Zulu
OLD_FILES+=usr/share/zoneinfo/Europe/Amsterdam
OLD_FILES+=usr/share/zoneinfo/Europe/Andorra
OLD_FILES+=usr/share/zoneinfo/Europe/Athens
OLD_FILES+=usr/share/zoneinfo/Europe/Belgrade
OLD_FILES+=usr/share/zoneinfo/Europe/Berlin
OLD_FILES+=usr/share/zoneinfo/Europe/Bratislava
OLD_FILES+=usr/share/zoneinfo/Europe/Brussels
OLD_FILES+=usr/share/zoneinfo/Europe/Bucharest
OLD_FILES+=usr/share/zoneinfo/Europe/Budapest
OLD_FILES+=usr/share/zoneinfo/Europe/Busingen
OLD_FILES+=usr/share/zoneinfo/Europe/Chisinau
OLD_FILES+=usr/share/zoneinfo/Europe/Copenhagen
OLD_FILES+=usr/share/zoneinfo/Europe/Dublin
OLD_FILES+=usr/share/zoneinfo/Europe/Gibraltar
OLD_FILES+=usr/share/zoneinfo/Europe/Guernsey
OLD_FILES+=usr/share/zoneinfo/Europe/Helsinki
OLD_FILES+=usr/share/zoneinfo/Europe/Isle_of_Man
OLD_FILES+=usr/share/zoneinfo/Europe/Istanbul
OLD_FILES+=usr/share/zoneinfo/Europe/Jersey
OLD_FILES+=usr/share/zoneinfo/Europe/Kaliningrad
OLD_FILES+=usr/share/zoneinfo/Europe/Kiev
OLD_FILES+=usr/share/zoneinfo/Europe/Lisbon
OLD_FILES+=usr/share/zoneinfo/Europe/Ljubljana
OLD_FILES+=usr/share/zoneinfo/Europe/London
OLD_FILES+=usr/share/zoneinfo/Europe/Luxembourg
OLD_FILES+=usr/share/zoneinfo/Europe/Madrid
OLD_FILES+=usr/share/zoneinfo/Europe/Malta
OLD_FILES+=usr/share/zoneinfo/Europe/Mariehamn
OLD_FILES+=usr/share/zoneinfo/Europe/Minsk
OLD_FILES+=usr/share/zoneinfo/Europe/Monaco
OLD_FILES+=usr/share/zoneinfo/Europe/Moscow
OLD_FILES+=usr/share/zoneinfo/Europe/Nicosia
OLD_FILES+=usr/share/zoneinfo/Europe/Oslo
OLD_FILES+=usr/share/zoneinfo/Europe/Paris
OLD_FILES+=usr/share/zoneinfo/Europe/Podgorica
OLD_FILES+=usr/share/zoneinfo/Europe/Prague
OLD_FILES+=usr/share/zoneinfo/Europe/Riga
OLD_FILES+=usr/share/zoneinfo/Europe/Rome
OLD_FILES+=usr/share/zoneinfo/Europe/Samara
OLD_FILES+=usr/share/zoneinfo/Europe/San_Marino
OLD_FILES+=usr/share/zoneinfo/Europe/Sarajevo
OLD_FILES+=usr/share/zoneinfo/Europe/Simferopol
OLD_FILES+=usr/share/zoneinfo/Europe/Skopje
OLD_FILES+=usr/share/zoneinfo/Europe/Sofia
OLD_FILES+=usr/share/zoneinfo/Europe/Stockholm
OLD_FILES+=usr/share/zoneinfo/Europe/Tallinn
OLD_FILES+=usr/share/zoneinfo/Europe/Tirane
OLD_FILES+=usr/share/zoneinfo/Europe/Uzhgorod
OLD_FILES+=usr/share/zoneinfo/Europe/Vaduz
OLD_FILES+=usr/share/zoneinfo/Europe/Vatican
OLD_FILES+=usr/share/zoneinfo/Europe/Vienna
OLD_FILES+=usr/share/zoneinfo/Europe/Vilnius
OLD_FILES+=usr/share/zoneinfo/Europe/Volgograd
OLD_FILES+=usr/share/zoneinfo/Europe/Warsaw
OLD_FILES+=usr/share/zoneinfo/Europe/Zagreb
OLD_FILES+=usr/share/zoneinfo/Europe/Zaporozhye
OLD_FILES+=usr/share/zoneinfo/Europe/Zurich
OLD_FILES+=usr/share/zoneinfo/Factory
OLD_FILES+=usr/share/zoneinfo/HST
OLD_FILES+=usr/share/zoneinfo/Indian/Antananarivo
OLD_FILES+=usr/share/zoneinfo/Indian/Chagos
OLD_FILES+=usr/share/zoneinfo/Indian/Christmas
OLD_FILES+=usr/share/zoneinfo/Indian/Cocos
OLD_FILES+=usr/share/zoneinfo/Indian/Comoro
OLD_FILES+=usr/share/zoneinfo/Indian/Kerguelen
OLD_FILES+=usr/share/zoneinfo/Indian/Mahe
OLD_FILES+=usr/share/zoneinfo/Indian/Maldives
OLD_FILES+=usr/share/zoneinfo/Indian/Mauritius
OLD_FILES+=usr/share/zoneinfo/Indian/Mayotte
OLD_FILES+=usr/share/zoneinfo/Indian/Reunion
OLD_FILES+=usr/share/zoneinfo/MET
OLD_FILES+=usr/share/zoneinfo/MST
OLD_FILES+=usr/share/zoneinfo/MST7MDT
OLD_FILES+=usr/share/zoneinfo/PST8PDT
OLD_FILES+=usr/share/zoneinfo/Pacific/Apia
OLD_FILES+=usr/share/zoneinfo/Pacific/Auckland
OLD_FILES+=usr/share/zoneinfo/Pacific/Bougainville
OLD_FILES+=usr/share/zoneinfo/Pacific/Chatham
OLD_FILES+=usr/share/zoneinfo/Pacific/Chuuk
OLD_FILES+=usr/share/zoneinfo/Pacific/Easter
OLD_FILES+=usr/share/zoneinfo/Pacific/Efate
OLD_FILES+=usr/share/zoneinfo/Pacific/Enderbury
OLD_FILES+=usr/share/zoneinfo/Pacific/Fakaofo
OLD_FILES+=usr/share/zoneinfo/Pacific/Fiji
OLD_FILES+=usr/share/zoneinfo/Pacific/Funafuti
OLD_FILES+=usr/share/zoneinfo/Pacific/Galapagos
OLD_FILES+=usr/share/zoneinfo/Pacific/Gambier
OLD_FILES+=usr/share/zoneinfo/Pacific/Guadalcanal
OLD_FILES+=usr/share/zoneinfo/Pacific/Guam
OLD_FILES+=usr/share/zoneinfo/Pacific/Honolulu
OLD_FILES+=usr/share/zoneinfo/Pacific/Johnston
OLD_FILES+=usr/share/zoneinfo/Pacific/Kiritimati
OLD_FILES+=usr/share/zoneinfo/Pacific/Kosrae
OLD_FILES+=usr/share/zoneinfo/Pacific/Kwajalein
OLD_FILES+=usr/share/zoneinfo/Pacific/Majuro
OLD_FILES+=usr/share/zoneinfo/Pacific/Marquesas
OLD_FILES+=usr/share/zoneinfo/Pacific/Midway
OLD_FILES+=usr/share/zoneinfo/Pacific/Nauru
OLD_FILES+=usr/share/zoneinfo/Pacific/Niue
OLD_FILES+=usr/share/zoneinfo/Pacific/Norfolk
OLD_FILES+=usr/share/zoneinfo/Pacific/Noumea
OLD_FILES+=usr/share/zoneinfo/Pacific/Pago_Pago
OLD_FILES+=usr/share/zoneinfo/Pacific/Palau
OLD_FILES+=usr/share/zoneinfo/Pacific/Pitcairn
OLD_FILES+=usr/share/zoneinfo/Pacific/Pohnpei
OLD_FILES+=usr/share/zoneinfo/Pacific/Port_Moresby
OLD_FILES+=usr/share/zoneinfo/Pacific/Rarotonga
OLD_FILES+=usr/share/zoneinfo/Pacific/Saipan
OLD_FILES+=usr/share/zoneinfo/Pacific/Tahiti
OLD_FILES+=usr/share/zoneinfo/Pacific/Tarawa
OLD_FILES+=usr/share/zoneinfo/Pacific/Tongatapu
OLD_FILES+=usr/share/zoneinfo/Pacific/Wake
OLD_FILES+=usr/share/zoneinfo/Pacific/Wallis
OLD_FILES+=usr/share/zoneinfo/UTC
OLD_FILES+=usr/share/zoneinfo/WET
OLD_FILES+=usr/share/zoneinfo/posixrules
OLD_FILES+=usr/share/zoneinfo/zone.tab
.endif

File Metadata

Mime Type
application/octet-stream
Expires
Tue, Jul 2, 4:57 PM (1 d, 23 h)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
ptSkzxOOaU61
Default Alt Text
(6 MB)

Event Timeline